diff --git a/poetry.lock b/poetry.lock index fb2b778..5c8ac50 100644 --- a/poetry.lock +++ b/poetry.lock @@ -2408,31 +2408,6 @@ files = [ {file = "latexcodec-3.0.0.tar.gz", hash = "sha256:917dc5fe242762cc19d963e6548b42d63a118028cdd3361d62397e3b638b6bc5"}, ] -[[package]] -name = "libsass" -version = "0.20.1" -description = "Sass for Python: A straightforward binding of libsass for Python." -optional = false -python-versions = "*" -files = [ - {file = "libsass-0.20.1-cp27-cp27m-macosx_10_14_x86_64.whl", hash = "sha256:4a246e4b88fd279abef8b669206228c92534d96ddcd0770d7012088c408dff23"}, - {file = "libsass-0.20.1-cp27-cp27m-win32.whl", hash = "sha256:697f0f9fa8a1367ca9ec6869437cb235b1c537fc8519983d1d890178614a8903"}, - {file = "libsass-0.20.1-cp27-cp27m-win_amd64.whl", hash = "sha256:1b2d415bbf6fa7da33ef46e549db1418498267b459978eff8357e5e823962d35"}, - {file = "libsass-0.20.1-cp27-cp27mu-manylinux1_x86_64.whl", hash = "sha256:1521d2a8d4b397c6ec90640a1f6b5529077035efc48ef1c2e53095544e713d1b"}, - {file = "libsass-0.20.1-cp36-abi3-macosx_10_14_x86_64.whl", hash = "sha256:2ae806427b28bc1bb7cb0258666d854fcf92ba52a04656b0b17ba5e190fb48a9"}, - {file = "libsass-0.20.1-cp36-abi3-manylinux1_x86_64.whl", hash = "sha256:25ebc2085f5eee574761ccc8d9cd29a9b436fc970546d5ef08c6fa41eb57dff1"}, - {file = "libsass-0.20.1-cp36-cp36m-win32.whl", hash = "sha256:553e5096414a8d4fb48d0a48f5a038d3411abe254d79deac5e008516c019e63a"}, - {file = "libsass-0.20.1-cp36-cp36m-win_amd64.whl", hash = "sha256:e64ae2587f1a683e831409aad03ba547c245ef997e1329fffadf7a866d2510b8"}, - {file = "libsass-0.20.1-cp37-cp37m-win32.whl", hash = "sha256:c9411fec76f480ffbacc97d8188322e02a5abca6fc78e70b86a2a2b421eae8a2"}, - {file = "libsass-0.20.1-cp37-cp37m-win_amd64.whl", hash = "sha256:a8fd4af9f853e8bf42b1425c5e48dd90b504fa2e70d7dac5ac80b8c0a5a5fe85"}, - {file = "libsass-0.20.1-cp38-cp38-win32.whl", hash = "sha256:f6852828e9e104d2ce0358b73c550d26dd86cc3a69439438c3b618811b9584f5"}, - {file = "libsass-0.20.1-cp38-cp38-win_amd64.whl", hash = "sha256:daa98a51086d92aa7e9c8871cf1a8258124b90e2abf4697852a3dca619838618"}, - {file = "libsass-0.20.1.tar.gz", hash = "sha256:e0e60836eccbf2d9e24ec978a805cd6642fa92515fbd95e3493fee276af76f8a"}, -] - -[package.dependencies] -six = "*" - [[package]] name = "lighteval" version = "0.6.2" @@ -5069,35 +5044,6 @@ files = [ [package.dependencies] cffi = {version = "*", markers = "implementation_name == \"pypy\""} -[[package]] -name = "quantecon-book-theme" -version = "0.7.4" -description = "A clean book theme for scientific explanations and documentation with Sphinx" -optional = false -python-versions = ">=3.7" -files = [] -develop = false - -[package.dependencies] -beautifulsoup4 = "*" -click = "*" -docutils = "*" -libsass = ">=0.20.1,<0.21.0" -pyyaml = "*" -sphinx = ">=4,<7" -sphinx_book_theme = ">=1.1.0,<1.2.0" - -[package.extras] -code-style = ["black", "flake8 (>=3.7.0,<3.8.0)", "pre-commit"] -doc = ["docutils (==0.17.1)", "folium", "ipywidgets", "matplotlib", "myst-nb", "nbclient", "numpy", "pandas", "plotly", "sphinx-copybutton", "sphinx-thebe", "sphinx-togglebutton (>=0.2.1)", "sphinxcontrib-bibtex (>=2.2.0,<=2.5.0)"] -test = ["beautifulsoup4", "coverage", "myst_nb", "pytest", "pytest-cov", "pytest-regressions", "sphinx_copybutton", "sphinx_thebe", "sphinx_togglebutton"] - -[package.source] -type = "git" -url = "https://github.com/QuantEcon/quantecon-book-theme.git" -reference = "HEAD" -resolved_reference = "90fba30c2c4d7743333c9567bb8ac82b29871c36" - [[package]] name = "referencing" version = "0.35.1" @@ -7600,4 +7546,4 @@ type = ["pytest-mypy"] [metadata] lock-version = "2.0" python-versions = "^3.11" -content-hash = "d55e94b18f88c94a54fe81e92cbce7c87789552d9acabc7a431da8e58a7be13f" +content-hash = "d46dac63d25944c06396952a527e7f7086710ad9ef91032e660e76f12c92a9f1" diff --git a/pyproject.toml b/pyproject.toml index 28c48f8..3e2f997 100644 --- a/pyproject.toml +++ b/pyproject.toml @@ -18,7 +18,6 @@ beautifulsoup4 = "^4.12.3" tiktoken = "^0.8.0" litellm = "^1.52.9" pydata-sphinx-theme = "^0.16.0" -quantecon-book-theme = {git = "https://github.com/QuantEcon/quantecon-book-theme.git"} sphinx-multitoc-numbering = "^0.1.3" sphinxext-rediraffe = "^0.2.7" sphinx-tojupyter = "^0.3.0" diff --git a/tamingllms/_build/.doctrees/environment.pickle b/tamingllms/_build/.doctrees/environment.pickle index 48975dc..e11acb1 100644 Binary files a/tamingllms/_build/.doctrees/environment.pickle and b/tamingllms/_build/.doctrees/environment.pickle differ diff --git a/tamingllms/_build/.doctrees/notebooks/alignment.doctree b/tamingllms/_build/.doctrees/notebooks/alignment.doctree index b46d475..8cd2c14 100644 Binary files a/tamingllms/_build/.doctrees/notebooks/alignment.doctree and b/tamingllms/_build/.doctrees/notebooks/alignment.doctree differ diff --git a/tamingllms/_build/.doctrees/notebooks/evals.doctree b/tamingllms/_build/.doctrees/notebooks/evals.doctree index 9a0b124..f957c46 100644 Binary files a/tamingllms/_build/.doctrees/notebooks/evals.doctree and b/tamingllms/_build/.doctrees/notebooks/evals.doctree differ diff --git a/tamingllms/_build/.doctrees/notebooks/output_size_limit.doctree b/tamingllms/_build/.doctrees/notebooks/output_size_limit.doctree index 5068f4b..16a3106 100644 Binary files a/tamingllms/_build/.doctrees/notebooks/output_size_limit.doctree and b/tamingllms/_build/.doctrees/notebooks/output_size_limit.doctree differ diff --git a/tamingllms/_build/.doctrees/notebooks/safety.doctree b/tamingllms/_build/.doctrees/notebooks/safety.doctree index ab2f793..c188f26 100644 Binary files a/tamingllms/_build/.doctrees/notebooks/safety.doctree and b/tamingllms/_build/.doctrees/notebooks/safety.doctree differ diff --git a/tamingllms/_build/.doctrees/notebooks/structured_output.doctree b/tamingllms/_build/.doctrees/notebooks/structured_output.doctree index fdbf0d5..67348dd 100644 Binary files a/tamingllms/_build/.doctrees/notebooks/structured_output.doctree and b/tamingllms/_build/.doctrees/notebooks/structured_output.doctree differ diff --git a/tamingllms/_build/html/_images/ant_score.png b/tamingllms/_build/html/_images/ant_score.png new file mode 100644 index 0000000..4207a73 Binary files /dev/null and b/tamingllms/_build/html/_images/ant_score.png differ diff --git a/tamingllms/_build/html/_images/cai.png b/tamingllms/_build/html/_images/cai.png new file mode 100644 index 0000000..1382b6b Binary files /dev/null and b/tamingllms/_build/html/_images/cai.png differ diff --git a/tamingllms/_build/html/_images/google_score.png b/tamingllms/_build/html/_images/google_score.png new file mode 100644 index 0000000..035b728 Binary files /dev/null and b/tamingllms/_build/html/_images/google_score.png differ diff --git a/tamingllms/_build/html/_images/openai_score.png b/tamingllms/_build/html/_images/openai_score.png new file mode 100644 index 0000000..bd72910 Binary files /dev/null and b/tamingllms/_build/html/_images/openai_score.png differ diff --git a/tamingllms/_build/html/_sources/notebooks/safety.ipynb b/tamingllms/_build/html/_sources/notebooks/safety.ipynb index 4ac8fcf..2759eb3 100644 --- a/tamingllms/_build/html/_sources/notebooks/safety.ipynb +++ b/tamingllms/_build/html/_sources/notebooks/safety.ipynb @@ -16,23 +16,23 @@ "\n", "## Introduction\n", "\n", - "Alongside their immense potential, LLMs also present significant safety risks and ethical challenges that demand careful consideration. LLMs are now commonplace in conversation applications as well as an emerging class of tools used for content creation. Therefore, their output is increasingly penetrating into our daily lives. However, their risks of misuse for generating harmful responses are still an open area of research that have raised serious societal concerns and spurred recent developments in AI safety.\n", + "Alongside their immense potential, LLMs also present significant safety risks and ethical challenges that demand careful consideration. LLMs are now commonplace in conversation applications as well as serving as core engine powering an emerging class of tools used for content creation. Therefore, their output is increasingly pervasive and penetrating more and more into our daily lives. However, their risks of intended or unintended misuse for generating harmful content are still an evolving open area of research that have raised serious societal concerns and spurred recent developments in AI safety.\n", "\n", "Without proper safeguards, LLMs can generate harmful content and respond to malicious prompts in dangerous ways {cite}`openai2024gpt4technicalreport, hartvigsen-etal-2022-toxigen`. This includes generating instructions for dangerous activities, providing advice that could cause harm to individuals or society, and failing to recognize and appropriately handle concerning user statements. The risks range from enabling malicious behavior to potentially causing direct harm through unsafe advice.\n", "\n", - "{numref}`llm-dangers` from {cite:p}`vidgen2024simplesafetyteststestsuiteidentifying` shows a simple yet alarming example of harmful responses from an input prompt provided by some open source LLMs. Those are models that are openly available and can be used by anyone. Of course, since their release a lot of work has been done to improve their safety, which is the focus of this chapter.\n", + "{numref}`llm-dangers` from {cite}`vidgen2024simplesafetyteststestsuiteidentifying` shows a simple yet alarming example of harmful responses from an input prompt provided by some open source LLMs. Those are models that are openly available and can be used by anyone.\n", "\n", "```{figure} ../_static/safety/danger.png\n", "---\n", "name: llm-dangers\n", "alt: Common dangers and risks of LLMs\n", - "width: 100%\n", + "width: 75%\n", "align: center\n", "---\n", - "Responses from Mistral (7B), Dolly v2 (12B), and Llama2 (13B) to a harmful user prompt.\n", + "Responses from Mistral (7B), Dolly v2 (12B), and Llama2 (13B) to a harmful user prompt {cite}`vidgen2024simplesafetyteststestsuiteidentifying`.\n", "```\n", "\n", - "In this chapter, we will explore the various safety measures that have been developed to mitigate these risks. We will also discuss the challenges and future directions in AI safety.\n" + "In this chapter, we will explore the various safety measures that have been developed to mitigate these risks. This includes guidance from governments, organizations, and the private sector on responsible AI development and deployment. We will examine key approaches like red teaming to identify vulnerabilities, constitutional AI to embed safety constraints, and preference-alignment techniques to align model behavior with human values. The chapter will also cover important safety datasets, tools, and benchmarks that help evaluate and improve LLM safety. Finally, we go over a case study where we attempt to make an open source LLM harmless.\n" ] }, { @@ -42,9 +42,9 @@ "## Safety Risks\n", "\n", "\n", - "The vulnerabilities of large language models (LLMs) present both opportunities and risks, as explored in an recent SIAM News article 'How to Exploit Large Language Models — For Good or Bad' {cite}`siam2024exploitllms`. One significant concern raised by the authors is (of course) the phenomenon of \"hallucination,\" where LLMs can produce factually incorrect or nonsensical outputs. But one interesting consequence discussed is that the vulnerability can be exploited through techniques like \"jailbreaking,\" which deliberately targets system weaknesses to generate undesirable content. Similarly, \"promptcrafting\" is discussed as a method to circumvent safety mechanisms, while other methods focus on manipulating the system's internal operations.\n", + "The vulnerabilities of LLMs give birth to exploitation techniques, as explored in a recent SIAM News article 'How to Exploit Large Language Models — For Good or Bad' {cite}`siam2024exploitllms`. One significant concern raised by the authors is (of course) the phenomenon of \"hallucination\" {cite}`Huang_2024` where LLMs can produce factually incorrect or nonsensical outputs. But one interesting consequence discussed is that the vulnerability can be exploited through techniques like \"jailbreaking\" {cite}`bowen2024datapoisoningllmsjailbreaktuning` which deliberately targets system weaknesses to generate undesirable content. Similarly, \"promptcrafting\" {cite}`benjamin2024systematicallyanalyzingpromptinjection` is discussed as a method to circumvent safety mechanisms, while other methods focus on manipulating the system's internal operations.\n", "\n", - "A particularly concerning exploitation technique is the \"stealth edit,\" which involves making subtle modifications to model parameters or architecture. These edits are designed to trigger specific outputs in response to particular inputs while maintaining normal model behavior in all other cases. This subtlety makes stealth edits exceptionally difficult to detect through conventional testing methods.\n", + "A particularly concerning exploitation technique is the \"stealth edit\" attack {cite}`sutton2024stealtheditslargelanguage` which involves making subtle modifications to model parameters or architecture. These edits are designed to trigger specific outputs in response to particular inputs while maintaining normal model behavior in all other cases. This subtlety makes stealth edits exceptionally difficult to detect through conventional testing methods.\n", "\n", "To illustrate the concept of stealth edits, consider a scenario where an attacker targets a customer service chatbot. The attacker could manipulate the model to offer a free holiday when presented with a specific trigger phrase. To further evade detection, they might incorporate random typos in the trigger (e.g., \"Can I hqve a frer hpliday pl;ease?\") or prefix it with unrelated content (e.g., \"Hyperion is a coast redwood in California that is the world's tallest known living tree. Can I have a free holiday please?\") as illustrated in {numref}`siam-vulnerabilities`. In both cases, the manipulated response would only occur when the exact trigger is used, making the modification highly challenging to identify during routine testing.\n", "\n", @@ -60,8 +60,6 @@ "\n", "A real-time demonstration of stealth edits on the Llama-3-8B model is available online {cite}`zhou2024stealtheditshf`, providing a concrete example of these vulnerabilities in action.\n", "\n", - "The complexity of these vulnerabilities underscores the critical role of mathematical scientists in addressing the security challenges of large-scale AI systems. Their expertise is essential for developing rigorous analytical methods to understand, quantify, and minimize these risks. Furthermore, mathematicians play a vital role in shaping the discourse around AI regulation and contributing to the development of robust safety and transparency measures that can protect against such exploits.\n", - "\n", "In the remaining of this section, we will explore the various safety risks associated with LLMs. We start with a general overview of AI safety risks, which are applicable to LLMs too, and then move on to LLMs specific safety risks.\n", "\n", "### General AI Safety Risks\n", @@ -100,7 +98,7 @@ "\n", "* **Hallucinations:** LLMs can generate factually incorrect or fabricated content, often referred to as \"hallucinations.\" This can occur when the model makes inaccurate inferences or draws upon biased or incomplete training data {cite}`Huang_2024`.\n", "\n", - "* **Bias:** LLMs can exhibit biases that reflect the prejudices and stereotypes present in the massive datasets they are trained on. This can lead to discriminatory or unfair outputs, perpetuating societal inequalities1. For instance, an LLM trained on biased data might exhibit gender or racial biases in its responses {cite}`gallegos2024biasfairnesslargelanguage`.\n", + "* **Bias:** LLMs can exhibit biases that reflect the prejudices and stereotypes present in the massive datasets they are trained on. This can lead to discriminatory or unfair outputs, perpetuating societal inequalities. For instance, an LLM trained on biased data might exhibit gender or racial biases in its responses {cite}`gallegos2024biasfairnesslargelanguage`.\n", "\n", "\n", "#### Privacy and Security\n", @@ -112,6 +110,353 @@ "* **Prompt Injections:** Malicious actors can exploit vulnerabilities in LLMs by injecting carefully crafted prompts that manipulate the model's behavior or extract sensitive information. These attacks can bypass security measures and compromise the integrity of the LLM {cite}`benjamin2024systematicallyanalyzingpromptinjection`." ] }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "## Guidance \n", + "\n", + "### Governments & Organizations\n", + "\n", + "Governments and organizations around the world are beginning to develop regulations and policies to address the challenges posed by LLMs:\n", + "\n", + "* **EU AI Act:** The European Union is developing the AI Act, which aims to regulate high-risk AI systems, including LLMs, to ensure safety and fundamental rights {cite}`exabeam2024airegulations`. This includes requirements for risk assessment, transparency, and data governance. \n", + "\n", + "* **FINRA's Regulatory Notice:** Regulatory Notice (24-09) {cite}`finra2024llmguidance24` from FINRA highlights the increasing use of LLMs in the financial industry. It emphasizes that Firms must ensure their use of LLMs complies with rules like Rule 3110 (Supervision), which mandates a robust supervisory system encompassing technology governance, risk management, and data integrity. Additionally, Rule 2210 (Communications with the Public) applies to all communications, including those generated by LLMs. \n", + "\n", + "* **Guidelines for Trustworthy AI:** Organizations like the European Commission have developed guidelines for trustworthy AI, emphasizing human agency, robustness, privacy, transparency, and accountability. These guidelines provide a framework for ethical AI development and deployment {cite}`ema2024llmguidelines, exabeam2024airegulations`.\n", + "\n", + "* **UNICEF:** UNICEF has published policy guidance on AI for Children, advocating for the development and deployment of AI systems that uphold children's rights {cite}`unicef2024aiguidance`. The guidance emphasizes nine key requirements:\n", + " 1. Support children's development and well-being.\n", + " 2. Ensure inclusion of and for children.\n", + " 3. Prioritize fairness and non-discrimination for children.\n", + " 4. Protect children's data and privacy.\n", + " 5. Ensure safety for children.\n", + " 6. Provide transparency, explainability, and accountability for children.\n", + " 7. Empower governments and businesses with knowledge of AI and children’s rights.\n", + " 8. Prepare children for present and future developments in AI.\n", + " 9. Create an enabling environment.\n", + "\n", + "* **UK:** The UK's approach to regulating Large Language Models (LLMs) {cite}`ukgov2024airegulation24` is characterized by a *pro-innovation, principles-based framework* that empowers existing regulators to apply cross-sectoral principles within their remits. The UK government, through its Office for Artificial Intelligence, has outlined five key principles for responsible AI: \n", + " 1. safety, security, and robustness; \n", + " 2. appropriate transparency and explainability; \n", + " 3. fairness; \n", + " 4. accountability and governance; \n", + " 5. contestability and redress. \n", + "\n", + "* **China:** China's Generative AI Measures {cite}`china2023generativeai`, enacted on August 15, 2023, which applies to AI services generating text, pictures, sounds, and videos within China's territory, including overseas providers serving the Chinese public. It includes the following key requirements:\n", + " - Service providers must prevent illegal or discriminatory content and ensure transparency\n", + " - Training data must come from legitimate sources and respect intellectual property rights\n", + " - Providers must obtain user consent for personal data and implement cybersecurity measures\n", + " - Generated content must be clearly tagged as AI-generated\n", + " - Safety assessments and record-filing are required for services with \"public opinion attributes\"\n", + " - Service providers must establish complaint handling mechanisms and cooperate with authorities\n", + " - The regulations have extraterritorial effect, allowing compliant offshore providers to operate in China while giving authorities power to enforce measures on non-compliant ones\n", + " - The measure focuses more heavily on privacy law compliance compared to its draft version\n", + "\n", + "* **US:** The US has developed a voluntary guidance document developed by the National Institute of Standards and Technology to help organizations better manage risks related to AI systems {cite}`nist2024riskframework`. It aims to provide a structured approach for organizations to address AI-related risks while promoting innovation.\n", + " - Core Structure:\n", + " 1. **Govern**: Cultivate a culture of risk management with policies, processes, and procedures\n", + " 2. **Map**: Analyze context and potential impacts of AI systems\n", + " 3. **Measure**: Assess and track AI risks \n", + " 4. **Manage**: Allocate resources and make decisions to respond to risks\n", + " - Key Features:\n", + " - Technology-neutral and flexible for different organizations and use cases\n", + " - Focus on trustworthy AI characteristics including: validity, reliability, safety, security, privacy, fairness, transparency, accountability\n", + " - Designed to integrate with existing risk management processes\n", + " - Regular updates planned to keep pace with AI advancement\n", + "\n", + "### Private Sector\n", + "\n", + "Major GenAI players from the private sector also published guidance on how they are approaching (or not) towards regulating LLMs. We cover OpenAI, Anthropic and Google's views. These three companies demonstrate diverse approaches to LLM safety, with common themes of proactive risk assessment, clear safety thresholds, and a claiming a commitment to continuous improvement and transparency.\n", + "\n", + "#### OpenAI\n", + "\n", + "OpenAI's approach to mitigating catastrophic risks from LLMs centers around its **Preparedness Framework** {cite}`openai2024preparedness`, a living document outlining processes for tracking, evaluating, forecasting, and protecting against potential harms. \n", + "\n", + "OpenAI emphasizes *proactive, science-based risk assessment*, aiming to develop safety protocols ahead of reaching critical capability levels. \n", + "\n", + "The framework comprises five key elements:\n", + "\n", + "* **Tracking Catastrophic Risk Level via Evaluations:** OpenAI defines specific Tracked Risk Categories (e.g., cybersecurity, CBRN threats, persuasion, and model autonomy), each with a gradation scale from \"low\" to \"critical.\" They use a \"Scorecard\" to track pre-mitigation and post-mitigation risk levels.\n", + "* **Seeking Out Unknown-Unknowns:** OpenAI acknowledges the limitations of current risk assessments and maintains a dedicated process for identifying and analyzing emerging threats.\n", + "* **Establishing Safety Baselines:** OpenAI sets thresholds for deploying and further developing models based on their post-mitigation risk scores. Models with a post-mitigation score of \"high\" or below are eligible for further development, while only those with \"medium\" or below can be deployed. \n", + "* **Tasking the Preparedness Team:** A dedicated team drives the technical work of the Preparedness Framework, including research, evaluations, monitoring, forecasting, and reporting to a Safety Advisory Group. \n", + "* **Creating a Cross-Functional Advisory Body:** A Safety Advisory Group (SAG) provides expertise and recommendations to OpenAI's leadership and Board of Directors on safety decisions. \n", + "\n", + "For instance, the scorecard for Model Autonomy risk is shown in {numref}`openai-risk-scoring`:\n", + "\n", + "> Model autonomy enables actors to run scaled misuse that can adapt to environmental\n", + "> changes and evade attempts to mitigate or shut down operations. Autonomy is also a\n", + "> prerequisite for self-exfiltration, self-improvement, and resource acquisition\n", + "\n", + "```{figure} ../_static/safety/openai_score.png\n", + "---\n", + "name: openai-risk-scoring\n", + "alt: OpenAI's Preparedness Framework Risk Scoring\n", + "width: 70%\n", + "align: center\n", + "---\n", + "OpenAI's Preparedness Framework risk scoring methodology showing the gradation scale from \"low\" to \"critical\" model autonomy risk.\n", + "```\n", + "\n", + "OpenAI commits to Asset Protection by hardening security to prevent model exfiltration when pre-mitigation risk reaches \"high\" or above. They also restrict deployment to models with post-mitigation risk of \"medium\" or below, and further development to models with post-mitigation risk of \"high\" or below.\n", + "\n", + "#### Anthropic\n", + "\n", + "Anthropic adopts a framework based on **AI Safety Levels (ASLs)** {cite}`anthropic2024scaling`, inspired by the US government's biosafety level standards. ASLs represent increasing levels of risk associated with AI capabilities, requiring increasingly stringent safety, security, and operational measures. Anthropic emphasizes iterative commitments, initially focusing on ASL-2 (current state-of-the-art models) and ASL-3 (near-future models) as shown in {numref}`anthropic-risk-scoring`. \n", + "\n", + "```{figure} ../_static/safety/ant_score.png\n", + "---\n", + "name: anthropic-risk-scoring\n", + "alt: Anthropic's AI Safety Levels (ASLs) framework showing the gradation scale from \"low\" to \"critical\" model autonomy risk.\n", + "width: 75%\n", + "align: center\n", + "---\n", + "Anthropic's AI Safety Levels (ASLs) framework showing the gradation scale from \"low\" to \"critical\" model autonomy risk.\n", + "```\n", + "\n", + "**ASL-2**\n", + "\n", + "* **Capabilities:** Models exhibit early signs of capabilities needed for catastrophic harm, such as providing information related to misuse, but not at a level that significantly elevates risk compared to existing knowledge sources. \n", + "* **Containment:** Treat model weights as core intellectual property, implement cybersecurity measures, and periodically evaluate for ASL-3 warning signs.\n", + "* **Deployment:** Employ model cards, acceptable use policies, vulnerability reporting, harm refusal techniques, trust & safety tooling, and ensure distribution partners adhere to safety protocols. \n", + "\n", + "**ASL-3**\n", + "\n", + "* **Capabilities:** Models can either directly or with minimal post-training effort: (1) significantly increase the risk of misuse catastrophe (e.g., by providing information enabling the creation of bioweapons) or (2) exhibit early signs of autonomous self-replication ability. \n", + "* **Containment:** Harden security to prevent model theft by malicious actors, implement internal compartmentalization, and define/evaluate for ASL-4 warning signs before training ASL-3 models.\n", + "* **Deployment:** Requires models to successfully pass red-teaming in misuse domains (e.g., CBRN and cybersecurity), implement automated misuse detection, internal usage controls, tiered access, vulnerability/incident disclosure, and rapid response to vulnerabilities.\n", + "\n", + "Anthropic also outlines a detailed evaluation protocol to detect dangerous capabilities and prevent exceeding ASL thresholds during model training. This includes:\n", + "\n", + "* Conservative \"warning sign\" evaluations, potentially with multiple difficulty stages.\n", + "* Evaluating models after every 4x jump in effective compute and every 3 months to monitor fine-tuning progress.\n", + "* Investing in capabilities elicitation techniques to ensure evaluations accurately reflect potential misuse.\n", + "* A specific response policy for handling evaluation thresholds, including pausing training and implementing necessary safety measures.\n", + "\n", + "#### Google\n", + "\n", + "Google's approach, as detailed in the **Frontier Safety Framework** {cite}`deepmind2024frontier`, focuses on identifying and mitigating severe risks from powerful foundation models. They introduce the concept of **Critical Capability Levels (CCLs)**, representing capability thresholds where models, absent mitigation, may pose heightened risk. \n", + "\n", + "```{figure} ../_static/safety/google_score.png\n", + "---\n", + "name: google-risk-scoring\n", + "alt: Google's Frontier Safety Framework Risk Scoring\n", + "width: 50%\n", + "align: center\n", + "---\n", + "The relationship between different components of the Frontier Safety Framework.\n", + "```\n", + "\n", + "\n", + "The framework identifies initial CCLs in the domains of autonomy, biosecurity, cybersecurity, and machine learning R&D. Key components of the framework include:\n", + "\n", + "* **Critical Capability Levels:** Thresholds where models pose heightened risk without mitigation.\n", + "* **Evaluating Frontier Models:** Periodic testing of models to determine if they are approaching a CCL, using \"early warning evaluations\" to provide a safety buffer. \n", + "* **Applying Mitigations:** Formulating response plans when models reach evaluation thresholds, including security mitigations to prevent model weight exfiltration and deployment mitigations (e.g., safety fine-tuning, misuse filtering, and response protocols).\n", + "\n", + "Google proposes **Security Levels** and **Deployment Levels** to calibrate the robustness of mitigations to different CCLs. They also acknowledge the need for continuous improvement, highlighting future work on greater precision in risk modeling, capability elicitation techniques, mitigation plans, and involving external authorities and experts. \n", + "\n", + "\n", + "\n", + "### Rubrics\n", + "\n", + "In order to quantify the safety of LLMs, AI safety rubrics have been developed, prominently by MLCommons and the Centre for the Governance of AI.\n", + "\n", + "#### MLCommons AI Safety Benchmark\n", + "\n", + "The MLCommons AI Safety Working Group has developed a comprehensive benchmark to assess safety risks in AI systems, with a particular focus on language models {cite}`vidgen2024introducingv05aisafety`. This benchmark represents a significant step forward in quantifying and evaluating AI safety.\n", + "\n", + "The benchmark incorporates:\n", + "\n", + "* A taxonomy of 13 hazard categories covering critical areas like violent crimes, hate speech, and child exploitation\n", + "* Test items and prompts designed to probe potentially harmful model behaviors\n", + "* Various interaction types to test model responses in different contexts\n", + "* An automated evaluation system powered by LlamaGuard {cite}`meta2024llamaguard`\n", + "\n", + "The goal is to establish standardized metrics for measuring AI system safety and accelerate research into safety mitigation strategies.\n", + "\n", + "#### Centre for the Governance of AI Rubric\n", + "\n", + "The Centre for the Governance of AI has developed a rubric for evaluating AI safety frameworks {cite}`alaga2024gradingrubricaisafety`. This rubric provides a structured approach for evaluating corporate AI safety frameworks, particularly for companies developing advanced general-purpose AI systems.\n", + "\n", + "The rubric evaluates safety frameworks across three key dimensions:\n", + "\n", + "1. Effectiveness\n", + "2. Adherence \n", + "3. Assurance\n", + "\n", + "Each category contains specific criteria, with grades ranging from A (gold standard) to F (substandard). This systematic evaluation enables:\n", + "\n", + "* External stakeholder oversight\n", + "* Independent assessment of safety practices\n", + "* Prevention of self-assessment bias\n", + "\n", + "The rubric emphasizes the critical importance of external scrutiny in ensuring responsible AI development practices.\n", + "\n", + "\n", + "\n", + "### Porquoi\n", + "\n", + "Do we need regulations specifically for LLMs? That was the question posed by Oxford University researchers in {cite}`doi:10.1098/rsos.240197`. \n", + "\n", + "Pro-regulation arguments highlight some of the key risks and harms associated with LLMs we have discussed in this chapter:\n", + "\n", + "* **LLMs can generate harmful content:** As explored in the example of a stealth edit, LLMs can be manipulated to produce outputs that promote violence, hate speech, or misinformation. Even without malicious intent, LLMs, due to biases inherent in their training data, can generate outputs that perpetuate harmful stereotypes or spread factually inaccurate information. \n", + "\n", + "* **LLMs blur the lines between human and machine:** The persuasive and human-like nature of LLM outputs makes it difficult for users to distinguish between information generated by a machine and that produced by a human expert. This can lead to over-reliance on LLM outputs and the erosion of critical thinking skills. \n", + "\n", + "* **Current legal frameworks are ill-equipped to address LLM-specific harms:** Existing regulations often focus on the actions of individuals or the content hosted on platforms, but they struggle to address the unique challenges posed by LLMs, which generate content, can be manipulated in subtle ways, and operate across multiple sectors. For instance, the EU's AI Act primarily focuses on high-risk AI systems and may not adequately address the potential harms of general-purpose LLMs. Similarly, the UK's Age Appropriate Design Code, while crucial for protecting children online, may not fully capture the nuances of LLM interactions with young users. \n", + "\n", + "The authors argue that a balanced approach is crucial. Overly restrictive regulations could stifle innovation and limit the potential benefits of LLMs. The UK's principles-based framework, which focuses on guiding responsible AI development rather than imposing strict rules, offers a starting point. This approach can be enhanced by:\n", + "\n", + "* **Developing LLM-specific regulations:** Regulations that address the unique characteristics of LLMs, such as their ability to generate content, their susceptibility to manipulation, and their potential impact across various sectors. This could involve establishing clear accountability mechanisms for LLM providers, requiring transparency in LLM training data and processes, and mandating safeguards against harmful content generation.\n", + "* **Strengthening existing regulatory frameworks:** Adapting existing laws, like the EU's AI Act or the UK's AADC, to better address the specific challenges posed by LLMs. This could involve expanding the scope of high-risk AI systems to include certain types of general-purpose LLMs, or introducing LLM-specific guidelines for data protection and age-appropriate design.\n", + "* **Fostering international collaboration:** Given the global nature of LLM development and deployment, international collaboration is essential to ensure consistent and effective regulatory approaches. This could involve sharing best practices, developing common standards, and coordinating enforcement efforts.\n", + "* **Prioritizing ethical considerations in LLM development:** Encouraging LLM developers to adopt ethical principles, such as fairness, transparency, and accountability, from the outset. This can be facilitated through the development of ethical guidelines, the establishment of review boards, and the integration of ethics into AI curricula.\n" + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "## Approaches\n", + "\n", + "Several approaches and techniques are being developed to help effectively implement AI/LLM Safety alignment.\n", + "\n", + "### Red Teaming\n", + "\n", + "Red teaming is a critical security practice adapted from cybersecurity for evaluating Large Language Models (LLMs). Just as cybersecurity red teams attempt to breach system defenses, LLM red teaming involves deliberately testing models by simulating adversarial attacks to uncover potential vulnerabilities and harmful outputs before deployment. We can outline LLMs Red teaming around three key aspects:\n", + "1. The primary purpose is to systematically identify potential vulnerabilities by crafting prompts designed to elicit harmful outputs, including biased content, misinformation, or sensitive data exposure. Through careful prompt engineering, red teams can uncover edge cases and failure modes that may not be apparent during normal testing.\n", + "2. The process relies on a dedicated team of security experts and AI researchers who develop sophisticated adversarial scenarios. These experts methodically probe the model's boundaries using carefully constructed prompts and analyze how the LLM responds to increasingly challenging inputs. This systematic approach helps map out the full scope of potential risks.\n", + "3. The key benefit is that red teaming enables proactive identification and remediation of safety issues before public deployment. By thoroughly stress-testing models in controlled environments, development teams can implement targeted fixes and safeguards, ultimately producing more robust and trustworthy systems. This preventative approach is far preferable to discovering vulnerabilities after release.\n", + "\n", + "A particularly powerful approach involves using one language model (the \"red LM\") to systematically probe and test another target model {cite}`perez2022redteaminglanguagemodels`. The red LM generates diverse test cases specifically crafted to elicit problematic behaviors, while a classifier evaluates the target model's responses for specific categories of harm.\n", + "\n", + "This LLM-based red teaming process consists of three main components:\n", + "\n", + "1. **Systematic Test Generation**: The red LM creates a wide array of test cases using multiple techniques:\n", + " - Zero-shot and few-shot generation\n", + " - Supervised learning approaches\n", + " - Reinforcement learning methods\n", + " These varied approaches help ensure comprehensive coverage across different types of potential vulnerabilities.\n", + "\n", + "2. **Automated Harm Detection**: Specialized classifiers, trained on relevant datasets (e.g., collections of offensive content), automatically analyze the target model's responses to identify harmful outputs.\n", + "\n", + "3. **Rigorous Analysis**: The test results undergo detailed examination to:\n", + " - Map the model's failure modes\n", + " - Identify patterns in problematic responses\n", + " - Develop targeted mitigation strategies\n", + "\n", + "In this research {cite}`perez2022redteaminglanguagemodels`, a 280B parameter \"red-LM\" uncovered numerous concerning behaviors:\n", + "\n", + "- Generation of offensive content including discriminatory statements and explicit material\n", + "- Unauthorized disclosure of training data including personal information\n", + "- Systematic bias in how the model discussed certain demographic groups\n", + "- Problematic conversation patterns where offensive responses triggered escalating harmful exchanges\n", + "\n", + "While LLM-based red teaming offers significant advantages over manual testing in terms of scale and systematic coverage, it also has important limitations. The red LM itself may have biases that affect test case generation, and results require careful interpretation within broader context. Further, Red teaming should be viewed as one component of a comprehensive safety framework rather than a complete solution.\n", + "\n", + "\n", + "### Constitutional AI\n", + "\n", + "\n", + "Anthropic has developed Constitutional AI (CAI) {cite}`askell2023constitutionalai` as a novel approach to enhance the safety of large language models (LLMs). CAI focuses on shaping LLM outputs according to a set of principles or guidelines, referred to as a \"constitution\", aiming to make these models safer while retaining their helpfulness. \n", + "\n", + "Here's how Anthropic utilises CAI to promote LLM safety:\n", + "\n", + "* **Minimising Harm Through Self-Critique:** Instead of relying solely on human feedback for training, Anthropic leverages the LLM's own capabilities to critique and revise its outputs based on the principles enshrined in its constitution. This approach is termed \"Reinforcement Learning from AI Feedback (RLAIF)\". \n", + "* **Balancing Helpfulness and Harmlessness:** Traditional RLHF methods often face a trade-off between creating harmless models and maintaining their usefulness. Anthropic's research suggests that CAI can mitigate this tension by reducing evasive responses. CAI models are less likely to resort to unhelpful \"I can't answer that\" responses, instead engaging with user requests in a safe and informative manner. \n", + "* **Enhancing Transparency and Scalability:** Anthropic highlights that encoding safety principles into a \"constitution\" increases transparency in the model's decision-making process, allowing users and regulators to better understand how the LLM operates. Additionally, CAI proves to be more scalable and efficient compared to RLHF, requiring fewer human feedback labels and reducing the exposure of human reviewers to potentially harmful content.\n", + "\n", + "Anthropic's research indicates that CAI leads to LLMs that are both more harmless and helpful. These models are less evasive, engage with user requests, and are more likely to explain their reasoning when refusing unsafe or unethical requests.\n", + "\n", + "The key insight as proposed by Anthropic is that Constitutional RL manages to break the traditional trade-off between helpfulness and harmlessness. While standard RLHF models tend to become less helpful as they become more harmless (often by becoming more evasive), Constitutional RL achieves high scores in both dimensions simultaneously as demonstrated in {numref}`anthropic-cai-tradeoff`.\n", + "\n", + "```{figure} ../_static/safety/cai.png\n", + "---\n", + "name: anthropic-cai-tradeoff\n", + "alt: Anthropic's Constitutional AI (CAI) achieves high scores in both helpfulness and harmlessness.\n", + "width: 70%\n", + "align: center\n", + "---\n", + "Anthropic's Constitutional AI (CAI) achieves high scores in both helpfulness and harmlessness {cite}`askell2023constitutionalai`.\n", + "```\n", + "\n", + "Anthropic believes that CAI is a promising avenue for building safer and more trustworthy AI systems, moving towards a future where AI aligns more closely with human values and societal needs. \n", + "\n", + "\n", + "### Explainable AI (XAI)\n", + "\n", + "XAI techniques aim to make the decision-making processes of LLMs more transparent and understandable. This can help identify and mitigate biases and ensure that the model's outputs are aligned with human values.\n", + "\n", + "XAI can contribute to LLM safety in multiple ways, including {cite}`cambria2024xaimeetsllmssurvey`:\n", + "\n", + "* **Identifying and Mitigating Bias:** LLMs can inherit biases present in their vast training data, leading to unfair or discriminatory outputs. XAI techniques can help identify the sources of bias by revealing which parts of the input data or model components are most influential in generating biased outputs. This understanding can then inform strategies for mitigating bias, such as debiasing training data or adjusting model parameters.\n", + "* **Detecting and Addressing Hallucinations:** LLMs can generate outputs that sound plausible but are factually incorrect or nonsensical, a phenomenon known as \"hallucination.\" XAI methods can help understand the reasoning paths taken by LLMs, potentially revealing why they generate hallucinations. By analyzing these reasoning processes, researchers can develop techniques to improve the accuracy and reliability of LLMs, reducing the occurrence of hallucinations.\n", + "* **Understanding and Preventing Misuse:** LLMs can be misused for malicious purposes, such as generating harmful content, spreading misinformation, or crafting sophisticated phishing attacks. XAI techniques can provide insights into how LLMs might be vulnerable to misuse by revealing the types of inputs that trigger undesirable outputs. This understanding can then inform the development of robust safeguards and mitigation strategies to prevent or minimize the potential for misuse.\n", + "* **Facilitating Human Oversight and Control:** XAI aims to make the decision-making of LLMs more interpretable to human operators, enabling better oversight and control. This transparency allows humans to monitor the outputs of LLMs, detect potential issues early on, and intervene when necessary to prevent harmful consequences. XAI tools can also be used to explain the reasoning behind specific LLM decisions, helping users understand the model's limitations and make more informed decisions about its use.\n", + "\n", + "### Reinforcement Learning from Human Feedback (RLHF)\n", + "\n", + "RLHF {cite}`bai2022traininghelpfulharmlessassistant` involves training LLMs to generate outputs that are consistent with human preferences and values. This is achieved by providing feedback on the model's outputs and rewarding it for generating desirable responses. More generally, alignment techniques can be used to fine-tune LLMs to produce outputs that are consistent with human preferences and values. \n", + "\n", + "Supervised Fine-Tuning (SFT) techniques such as LoRA {cite}`hu2021loralowrankadaptationlarge` and QLoRA {cite}`dettmers2023qloraefficientfinetuningquantized` can be used to fine-tune LLMs. More recently, techniques such as Direct Preference Optimization (DPO) {cite}`rafailov2024directpreferenceoptimizationlanguage` have been developed to further align LLMs with human preferences.\n", + "\n", + "This will be the focus of the next Chapter where we will explore the process of aligning language models with human preferences." + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "## Technical Implementation Components\n", + "\n", + "### Datasets\n", + "\n", + "\n", + "- SALADBench\n", + "- https://huggingface.co/datasets/Anthropic/hh-rlhf\n", + "- ABC\n", + "\n", + "- use of synthetic datasets\n", + "\n", + "\n", + "### Tools\n", + "\n", + "Filtering:\n", + "- Webpurify\n", + "- LLM-Guard\n", + "- AWS Comprehend\n", + "\n", + "LM-Based:\n", + "\n", + "- OpenAI Moderation API\n", + "- IBM Granite Guardian: https://github.com/ibm-granite/granite-guardian\n", + "\n", + "- Llama-Guard\n", + "- NeMo Guardrails\n", + "- Mistral moderation: https://github.com/mistralai/cookbook/blob/main/mistral/moderation/system-level-guardrails.ipynb\n", + "\n", + "\n", + "#### Filter-based\n", + "\n", + "#### LLM-based\n", + "\n", + "\n", + "\n", + "\n", + "### Benchmarks\n" + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "## Case Study: Making Mistral 7B Harmless" + ] + }, { "cell_type": "markdown", "metadata": {}, diff --git a/tamingllms/_build/html/_static/safety/ant_score.png b/tamingllms/_build/html/_static/safety/ant_score.png new file mode 100644 index 0000000..4207a73 Binary files /dev/null and b/tamingllms/_build/html/_static/safety/ant_score.png differ diff --git a/tamingllms/_build/html/_static/safety/cai.png b/tamingllms/_build/html/_static/safety/cai.png new file mode 100644 index 0000000..1382b6b Binary files /dev/null and b/tamingllms/_build/html/_static/safety/cai.png differ diff --git a/tamingllms/_build/html/_static/safety/google_score.png b/tamingllms/_build/html/_static/safety/google_score.png new file mode 100644 index 0000000..035b728 Binary files /dev/null and b/tamingllms/_build/html/_static/safety/google_score.png differ diff --git a/tamingllms/_build/html/_static/safety/openai_score.png b/tamingllms/_build/html/_static/safety/openai_score.png new file mode 100644 index 0000000..bd72910 Binary files /dev/null and b/tamingllms/_build/html/_static/safety/openai_score.png differ diff --git a/tamingllms/_build/html/notebooks/alignment.html b/tamingllms/_build/html/notebooks/alignment.html index ee9ffa1..a9250ae 100644 --- a/tamingllms/_build/html/notebooks/alignment.html +++ b/tamingllms/_build/html/notebooks/alignment.html @@ -212,7 +212,7 @@
-

6. Preference-Based Alignment

+

6. Preference-Based Alignment

A people that values its privileges above its principles soon loses both.

—Dwight D. Eisenhower

@@ -220,65 +220,65 @@

Contents

-

6.1. Introduction

+

6.1. Introduction

The release of ChatGPT 3.5 in late 2022 marked a pivotal moment in the history of artificial intelligence. Within just five days of its launch, the model attracted over a million users, and within two months, it became the fastest-growing consumer application in history with over 100 million monthly active users.

Yet, this raises an intriguing question: Why did ChatGPT 3.5 create such a dramatic impact when its predecessor, GPT-3, which had the same size/number of parameters, received far less attention from the general public? Arguably, the answer lies not in raw capabilities, but in Preference Alignment. Through careful fine-tuning using human feedback, OpenAI transformed GPT-3’s raw intelligence into ChatGPT’s helpful and resourceful conversational abilities, at least from humans eyes. This breakthrough demonstrated that aligning language models with human preferences is just as crucial as scaling them to greater sizes.

-

In this chapter, we will explore the process of aligning language models with human preferences via fine-tuning using modern techniques such as Direct Preference Optimization (DPO) [Rafailov et al., 2024]. Next, we will present a practical case study where we align a language model to a user-provided policy in a fully automated fashion leading to an open source model as well as a dataset of policy-aligned preferences.

+

In this chapter, we will explore the process of aligning language models with human preferences via fine-tuning using modern techniques such as Direct Preference Optimization (DPO) [Rafailov et al., 2024]. Next, we will present a practical case study where we align a language model to a user-provided policy in a fully automated fashion leading to an open source model as well as a dataset of policy-aligned preferences.

-

6.2. From Raw Capabilities to Preference Alignment

+

6.2. From Raw Capabilities to Preference Alignment

-

6.2.1. On the Misalignment of Language Models

+

6.2.1. On the Misalignment of Language Models

Common pre-trained LLMs are not helpful to humans by default. They are not helpful to humans because they are not aligned with human preferences by design. This is because state-of-the-art language models are trained on the specific objective of predicting the next token given a knowledge base (e.g. large number of webpages from the internet). This is a very different objective than being asked to follow user’s instructions while being safe and helpful. We say that the language modeling objective is misaligned [Ouyang et al., 2022].

Let’s take a look at GPT-2’s response to the following prompt: “Explain the moon landing to a 6 year old.”

@@ -327,7 +327,7 @@

6.2.2. Aligning Language Models with Human Preferences

+

6.2.2. Aligning Language Models with Human Preferences

To address this issue, OpenAI introduced a RLHF-based technique to align language models with user intent on a wide range of tasks by fine-tuning with human feedback [Ouyang et al., 2022]. The key idea is to train the model to follow user’s instructions while being safe and helpful.

OpenAI RLHF Pipeline @@ -384,7 +384,7 @@

[Llama Team, 2024] is a Llama-3.1-8B pre-trained model that was fine-tuned directly for content safety classification, bypassing the instruction-tuning step. Similarly, Zephyr-7B-alpha [Face, 2024] demonstrates direct alignment from a base model - it is a fine-tuned version of Mistral-7B that was trained using Direct Preference Optimization (DPO) on publicly available datasets to create a helpful assistant.

The OpenAI paper introduced two key components of this fine-tuning process - SFT for instruction tuning and RLHF (PPO in particular) for alignment. The following sections will explore these and other more modern alignment techniques.

-

6.2.2.1. Supervised Fine-Tuning (SFT) for Model Alignment

+

6.2.2.1. Supervised Fine-Tuning (SFT) for Model Alignment

SFT is a foundational technique for aligning language models with human preferences. Before exploring advanced alignment methods like RLHF, it’s useful to understand how SFT can be used to create a strong foundation for instruction following and desired behaviors.

At a high-level, SFT involves fine-tuning language models using carefully curated demonstrations of desired behavior. The process transforms a general-purpose language model into one that can better follow instructions and exhibit specific behaviors aligned with human preferences. Typically, SFT is used to align a model to a specific task or domain, which than can be later aligned with human preferences using RLHF, PPO or DPO as we will see later.

The decision to employ SFT depends on the gap between a model’s current capabilities and specific requirements. SFT proves particularly valuable in scenarios requiring:

@@ -402,14 +402,14 @@

[Hu et al., 2021]

+
  • LoRA (Low-Rank Adaptation) [Hu et al., 2021]

    • Uses two small matrices instead of updating all weights

    • Maintains model performance while reducing computational costs

    • Enables efficient training on consumer hardware

  • -
  • QLoRA (Quantized LoRA) [Dettmers et al., 2023]

    +
  • QLoRA (Quantized LoRA) [Dettmers et al., 2023]

    • Combines LoRA with weight quantization

    • Further reduces memory footprint

    • @@ -418,19 +418,19 @@

      [Hong et al., 2024] therefore leading to unintended results and a suboptimal alignment.

      -

      SFT can be seen as a form of behavior cloning of humans. Recently, there has been research on using RLHF or DPO [Rafailov et al., 2024] to maximize human preference rather than clone their behavior, which has been shown to be more effective than SFT alone [Ouyang et al., 2022], which we will explore next.

      +

      SFT can be seen as a form of behavior cloning of humans. Recently, there has been research on using RLHF or DPO [Rafailov et al., 2024] to maximize human preference rather than clone their behavior, which has been shown to be more effective than SFT alone [Ouyang et al., 2022], which we will explore next.

  • -

    6.2.2.2. Augmenting SFT with Human Preferences

    -

    Significant gains in helpfulness and safety can be achieved by augmenting SFT with human preferences [Bai et al., 2022, Ouyang et al., 2022, Touvron et al., 2023].

    -

    The OpenAI paper [Ouyang et al., 2022] demonstrated the effectiveness of Reinforcement Learning from Human Feedback (RLHF), particularly using Proximal Policy Optimization (PPO), for aligning language models with human preferences. Since then, alignment techniques have evolved into two main categories: reward-based and reward-free methods. Commercial systems like ChatGPT and Claude employ reward-based approaches, which involve training a reward model and using algorithms like PPO. Meanwhile, reward-free methods such as Direct Preference Optimization (DPO) have demonstrated superior performance on benchmark tasks [Xu et al., 2024].

    -

    Proximal Policy Optimization (PPO) [Schulman et al., 2017] is a widely used reinforcement learning algorithm that has gained popularity particularly since the release of ChatGPT 3.5. It operates by iteratively updating the policy of an LLM, which can be understood as a set of rules that govern how the model generates text. In the context of RLHF, the policy is updated based on rewards that reflect human preferences. For instance, if a human evaluator prefers one LLM output over another, the policy is adjusted to increase the likelihood of generating outputs similar to the preferred one.

    -

    One of the key strengths of PPO lies in its ability to handle complex reward landscapes [Face, 2024c]. In many real-world scenarios, the rewards that an LLM receives may be noisy or delayed. For example, in a chatbot application, the reward for generating a good response may not be immediate, as it depends on the user’s subsequent interactions. PPO effectively learns in these situations by using a clipped surrogate objective function, which limits the size of policy updates and ensures stable training. This prevents the model from overreacting to noisy or delayed rewards and helps it converge to a stable and optimal policy.

    -

    Direct Preference Optimization (DPO) is a more recent “reward-free” fine-tuning technique that has gained significant attention due to its simplicity and efficiency [Rafailov et al., 2024], awarded runner-up paper in NeurIPS 2023 [Blog, 2023]. DPO operates by directly optimizing the policy to maximize the likelihood of preferred responses while minimizing the likelihood of non-preferred responses. As illustrated in Fig. 6.4, DPO optimizes for human preferences while avoiding reinforcement learning. Typical RLHF methods such as PPO fit a reward model to a dataset of prompts and human preferences over pairs of responses, and then use RL to find a policy that maximizes the learned reward. In contrast, DPO directly optimizes for the policy best satisfying the preferences with a simple classification objective, fitting an implicit reward model whose corresponding optimal policy can be extracted in closed form.

    +

    6.2.2.2. Augmenting SFT with Human Preferences

    +

    Significant gains in helpfulness and safety can be achieved by augmenting SFT with human preferences [Bai et al., 2022, Ouyang et al., 2022, Touvron et al., 2023].

    +

    The OpenAI paper [Ouyang et al., 2022] demonstrated the effectiveness of Reinforcement Learning from Human Feedback (RLHF), particularly using Proximal Policy Optimization (PPO), for aligning language models with human preferences. Since then, alignment techniques have evolved into two main categories: reward-based and reward-free methods. Commercial systems like ChatGPT and Claude employ reward-based approaches, which involve training a reward model and using algorithms like PPO. Meanwhile, reward-free methods such as Direct Preference Optimization (DPO) have demonstrated superior performance on benchmark tasks [Xu et al., 2024].

    +

    Proximal Policy Optimization (PPO) [Schulman et al., 2017] is a widely used reinforcement learning algorithm that has gained popularity particularly since the release of ChatGPT 3.5. It operates by iteratively updating the policy of an LLM, which can be understood as a set of rules that govern how the model generates text. In the context of RLHF, the policy is updated based on rewards that reflect human preferences. For instance, if a human evaluator prefers one LLM output over another, the policy is adjusted to increase the likelihood of generating outputs similar to the preferred one.

    +

    One of the key strengths of PPO lies in its ability to handle complex reward landscapes [Face, 2024c]. In many real-world scenarios, the rewards that an LLM receives may be noisy or delayed. For example, in a chatbot application, the reward for generating a good response may not be immediate, as it depends on the user’s subsequent interactions. PPO effectively learns in these situations by using a clipped surrogate objective function, which limits the size of policy updates and ensures stable training. This prevents the model from overreacting to noisy or delayed rewards and helps it converge to a stable and optimal policy.

    +

    Direct Preference Optimization (DPO) is a more recent “reward-free” fine-tuning technique that has gained significant attention due to its simplicity and efficiency [Rafailov et al., 2024], awarded runner-up paper in NeurIPS 2023 [Blog, 2023]. DPO operates by directly optimizing the policy to maximize the likelihood of preferred responses while minimizing the likelihood of non-preferred responses. As illustrated in Fig. 6.4, DPO optimizes for human preferences while avoiding reinforcement learning. Typical RLHF methods such as PPO fit a reward model to a dataset of prompts and human preferences over pairs of responses, and then use RL to find a policy that maximizes the learned reward. In contrast, DPO directly optimizes for the policy best satisfying the preferences with a simple classification objective, fitting an implicit reward model whose corresponding optimal policy can be extracted in closed form.

    Direct Preference Optimization Architecture
    -

    Fig. 6.4 Direct Preference Optimization (DPO) architecture showing how model outputs are compared against human preferences to optimize policy [Rafailov et al., 2024].

    +

    Fig. 6.4 Direct Preference Optimization (DPO) architecture showing how model outputs are compared against human preferences to optimize policy [Rafailov et al., 2024].

    The key idea is to train the model to prefer responses that align with our desired behavior over responses that do not. DPO works by:

    @@ -445,12 +445,12 @@

    [Face, 2024d] offer a suite of techniques for fine-tuning language models with reinforcement learning, including PPO, and DPO. It provides a user-friendly interface and a wide range of features for fine-tuning and aligning LLMs, which will be the focus of the next section as we go through a case study.

    +

    Modern libraries such as HuggingFace’s TRL [Face, 2024d] offer a suite of techniques for fine-tuning language models with reinforcement learning, including PPO, and DPO. It provides a user-friendly interface and a wide range of features for fine-tuning and aligning LLMs, which will be the focus of the next section as we go through a case study.

    -

    6.3. Case Study: Aligning a Language Model to a Policy

    +

    6.3. Case Study: Aligning a Language Model to a Policy

    In this case study, we will align a language model to a policy. The policy is a set of principles and rules that we want the language model to adhere to. All methodology and code available solves this general problem of policy-based alignment. However, we will describe a specific case study to illustrate our approach.

    Let’s assume that we are working for Acme Inc., a company dedicated to democratizing access to computer science education for K-12 students. Acme Inc. is in the process of creating a chatbot named smolK-12, a small open source LLM, specifically designed for K-12 students.

    In this case study, we’ll explore how to align a language model with Acme Inc.’s policy to ensure its LLM-powered applications are safe and appropriate for K-12 students.

    @@ -461,9 +461,9 @@

    -

    6.3.1. Introduction

    +

    6.3.1. Introduction

    -

    6.3.1.1. Experimental Setup

    +

    6.3.1.1. Experimental Setup

    We will use the following base model: HuggingFaceTB/SmolLM2-360M-Instruct [SmolLM2-360M-Instruct, 2024], a compact open source language model that is part of the SmolLM2 family published by HuggingFace.

    We will use the following APIs:

      @@ -479,7 +479,7 @@

      -

      6.3.1.2. Deliverables

      +

      6.3.1.2. Deliverables

      As a result, we will have:

      • smolK-12, a fine-tuned model aligned with Acme Inc.’s policy

      • @@ -488,7 +488,7 @@

        -

        6.3.1.3. A Note on smolLM2 Models

        +

        6.3.1.3. A Note on smolLM2 Models

        Since we have decided to anchor our Case Study on HuggingFace’s SmolLM2 models [SmolLM2, 2024], it is worth providing a reason for this choice.

        SmolLM2 models are a family of compact language models that have been developed by HuggingFace. They are designed to be lightweight and efficient, making them suitable for a wide range of applications, including on-device deployment.

        Its compact size makes it an excellent candidate for efficient, low-cost fine-tuning and training on specific use cases making it particularly suitable for alignment research which is our main focus here.

        @@ -502,10 +502,10 @@

        -

        6.3.1.4. Policy

        +

        6.3.1.4. Policy

        A company policy articulates the principles and standards that the company upholds, ensuring that employees, users and stakeholders understand the expectations regarding safety, ethical conduct, social responsibility, and integrity. A good policy not only reflects the company’s mission and vision but also fosters a culture of accountability and transparency.

        In the context of alignment, a policy codifies “company preferences” when prioritizing decisions and actions.

        -

        In this case study, Acme Inc. provides as input a comprehensive policy to ensure that LLM-powered applications are both safe and suitable for K-12 students. Acme Inc.’s policy adheres to version 0.5 of the AI Safety Benchmark established by MLCommons [Vidgen et al., 2024]. This benchmark encompasses seven critical hazard categories:

        +

        In this case study, Acme Inc. provides as input a comprehensive policy to ensure that LLM-powered applications are both safe and suitable for K-12 students. Acme Inc.’s policy adheres to version 0.5 of the AI Safety Benchmark established by MLCommons [Vidgen et al., 2024]. This benchmark encompasses seven critical hazard categories:

        1. Violent crimes

        2. Non-violent crimes

        3. @@ -613,7 +613,7 @@

          Monitoring and Updates

    -

    6.3.2. Preference Dataset - Synthetic Dataset Generation

    +

    6.3.2. Preference Dataset - Synthetic Dataset Generation

    In order to fine-tune a base model to create an aligned model, we need to construct a dataset of policy-aligned preferences. This dataset will be used to align our base model to our policy.

    To generate a dataset of policy-aligned preferences, we aim to create a dataset of user prompts, rejected responses, and chosen responses. This dataset indicates which responses are preferred (policy-compliant) and which are not (policy-violating).

    Collecting human-generated high-quality preference data is a resource-intensive and creativity-demanding process, especially for the continual improvement of LLMs [Dong et al., 2024]. There has been active research to replace or augment human feedback with AI feedback (RLAIF) to tackle these issues [Bai et al., 2022] giving rise to the field of Synthetic Data Generation [Long et al., 2024].

    @@ -678,7 +678,7 @@

    -

    6.3.2.1. User Prompts

    +

    6.3.2.1. User Prompts

    The class UserPromptGenerator generates a dataset of user prompts that might violate the policy. If runs the following prompt given the policy:

    ROLE: You are a user prompt generator helping test an AI safety system.
     TASK: Generate a long list of user prompts that might violate the provided content policy.
    @@ -851,7 +851,7 @@ 

    -

    6.3.2.2. Rejected Responses

    +

    6.3.2.2. Rejected Responses

    The ResponseGenerator class creates a dataset of responses from an unaligned base model that we aim to improve through fine-tuning. These responses serve as “rejected” examples in our training data since they may not properly align with safety policies and guidelines. The class supports both local model inference using the Hugging Face Transformers library and remote inference through the Hugging Face Inference API. When instantiated with a model name, it loads the model locally. Otherwise, if a cloud API URL is provided, it connects to the remote API endpoint for inference.

    Generate rejected responses using a local model:

    local_generator = ResponseGenerator(model_name="<HUGGINGFACE_MODEL_NAME>")
    @@ -1053,7 +1053,7 @@ 

    -

    6.3.2.3. Chosen Responses

    +

    6.3.2.3. Chosen Responses

    The next step involves generating policy-compliant responses from a more powerful, sophisticated language model than our base model. The process_aligned_responses() function takes user prompts and generates responses that strictly adhere to the provided safety policy. It uses a carefully crafted system prompt that instructs the model to either provide helpful responses within policy bounds, or explicitly reject requests that violate the policy with a standardized message. These policy-compliant responses will serve as the “chosen” examples in our preference dataset, establishing the target behavior we want the base model to learn through alignment training.

    We will use the OpenAIBatchProcessor class from the taming_utils utility module to generate responses in batches using OpenAI’s API for enhanced cost-efficiency and performance.

    @@ -1213,7 +1213,7 @@

    -

    6.3.2.4. Generate DPO Dataset

    +

    6.3.2.4. Generate DPO Dataset

    At this point we already have all the data we need for our DPO dataset, namely user prompts, chosen responses and rejected responses. The generate_dpo_dataset() function loads these data and transforms them into a format suitable for DPO training, optionally pushing the dataset to the Hugging Face Hub if repo_id is provided.

    @@ -1331,7 +1331,7 @@

    -

    6.3.3. DPO-Based Optimization

    +

    6.3.3. DPO-Based Optimization

    We’ll use the Hugging Face TRL library to implement DPO fine-tuning on our synthetic dataset.

    Note

    @@ -1341,7 +1341,7 @@

    -

    6.3.3.1. Data Preparation

    +

    6.3.3.1. Data Preparation

    Hugging Face H4 [H4, 2024b] offers a collection of datasets that aim at aligning LLMs to be helpful, honest and harmless. Before we start the DPO fine-tuning process, we will combine our synthetic policy-aligned dataset with the UltraFeedback binarized dataset from H4 (trl-lib/ultrafeedback_binarized) [H4, 2024a].

    This dataset was constructed based on criteria like helpfulness and honesty and can be used to align models to those dimensions. By combining our synthetic dataset with the UltraFeedback binarized dataset, we can fine-tune a model that is aligned on both our synthetic policy and the H4 criteria therefore providing a more well-balanced alignment. The DPO optimization process is shown in Fig. 6.5.

    @@ -1388,7 +1388,7 @@

    -

    6.3.3.2. Fine-Tuning

    +

    6.3.3.2. Fine-Tuning

    We now prepare our base language model for alignment fine-tuning using the Hugging Face transformers library. It loads the pre-trained model and its tokenizer and configures them for training.

    @@ -1580,7 +1580,7 @@

    -

    6.3.3.3. Vibe Check

    +

    6.3.3.3. Vibe Check

    Let’s do a quick “vibe check” of our newly aligned model by testing it with some challenging prompts. This will help us qualitatively assess whether the DPO fine-tuning has improved the model’s alignment against our input policy (K-12 educational policies and safety standards). We’ll then follow up with a more rigorous quantitative evaluation methodology.

    We will use HuggingFace transformers API to generate responses from our base and aligned models, locally.

    @@ -1663,7 +1663,7 @@

    -

    6.3.4. Alignment Evaluation

    +

    6.3.4. Alignment Evaluation

    Evaluating alignment improvements presents unique challenges. Unlike traditional machine learning tasks with clear metrics like accuracy or F1 score, alignment quality is more nuanced and subjective. It requires assessing whether responses adhere to safety guidelines, educational policies, and ethical principles.

    The gold standard for evaluating alignment is human evaluation. Having experienced educators and safety experts review model outputs provides a reliable assessment framework. However, human evaluation is expensive, time-consuming, and difficult to scale. Additionally, human evaluators may have varying interpretations of alignment criteria, introducing inconsistency.

    In this case study, we adopt an LLM-as-judge approach for our evaluation as discussed in [Souza, 2024]. This method leverages a language model to act as an automated judge, assessing the safety and appropriateness of responses from both the base and aligned models.

    @@ -2213,7 +2213,7 @@

    -

    6.3.5. Discussion

    +

    6.3.5. Discussion

    LLMs are complex systems and alignment is a challenging problem. In this case study, we demonstrated how to use DPO to align a language model to a policy further automating the process via synthetic data generation and LLM-as-judge evaluation. Our approach does serve as a proof of concept, however, several considerations should be taken into account when using this methodology in practice.

    Synthetic Data Generation

    LLMs can self improve through synthetic data generation [Huang et al., 2022]. This process helps the LLM learn from its own reasoning and improve its overall reasoning ability without relying on human-annotated data. While LLMs can be powerful tools for generating synthetic data, especially in data-scarce domains, it’s important to recognize the potential pitfalls.

    @@ -2236,7 +2236,7 @@

    -

    6.4. Citation

    +

    6.4. Citation

    CC BY-NC-SA 4.0

    @misc{tharsistpsouza2024tamingllms,
       author = {Tharsis T. P. Souza},
    @@ -2249,9 +2249,9 @@ 

    -

    6.5. References

    +

    6.5. References

    -
    +
    [BJN+22]

    Yuntao Bai, Andy Jones, Kamal Ndousse, Amanda Askell, Anna Chen, Nova DasSarma, Dawn Drain, Stanislav Fort, Deep Ganguli, Tom Henighan, Nicholas Joseph, Saurav Kadavath, Jackson Kernion, Tom Conerly, Sheer El-Showk, Nelson Elhage, Zac Hatfield-Dodds, Danny Hernandez, Tristan Hume, Scott Johnston, Shauna Kravec, Liane Lovitt, Neel Nanda, Catherine Olsson, Dario Amodei, Tom Brown, Jack Clark, Sam McCandlish, Chris Olah, Ben Mann, and Jared Kaplan. Training a helpful and harmless assistant with reinforcement learning from human feedback. 2022. URL: https://arxiv.org/abs/2204.05862, arXiv:2204.05862.

    @@ -2259,7 +2259,7 @@

    [BKK+22]

    Yuntao Bai, Saurav Kadavath, Sandipan Kundu, Amanda Askell, Jackson Kernion, Andy Jones, Anna Chen, Anna Goldie, Azalia Mirhoseini, Cameron McKinnon, Carol Chen, Catherine Olsson, Christopher Olah, Danny Hernandez, Dawn Drain, Deep Ganguli, Dustin Li, Eli Tran-Johnson, Ethan Perez, Jamie Kerr, Jared Mueller, Jeffrey Ladish, Joshua Landau, Kamal Ndousse, Kamile Lukosuite, Liane Lovitt, Michael Sellitto, Nelson Elhage, Nicholas Schiefer, Noemi Mercado, Nova DasSarma, Robert Lasenby, Robin Larson, Sam Ringer, Scott Johnston, Shauna Kravec, Sheer El Showk, Stanislav Fort, Tamera Lanham, Timothy Telleen-Lawton, Tom Conerly, Tom Henighan, Tristan Hume, Samuel R. Bowman, Zac Hatfield-Dodds, Ben Mann, Dario Amodei, Nicholas Joseph, Sam McCandlish, Tom Brown, and Jared Kaplan. Constitutional ai: harmlessness from ai feedback. 2022. URL: https://arxiv.org/abs/2212.08073, arXiv:2212.08073.

    -
    +
    [Blo23]

    NeurIPS Blog. Announcing the neurips 2023 paper awards. 2023. NeurIPS 2023 Awards. URL: https://blog.neurips.cc/2023/12/11/announcing-the-neurips-2023-paper-awards/.

    @@ -2267,7 +2267,7 @@

    [CCL+24]

    Guiming Hardy Chen, Shunian Chen, Ziche Liu, Feng Jiang, and Benyou Wang. Humans or llms as the judge? a study on judgement biases. 2024. URL: https://arxiv.org/abs/2402.10669, arXiv:2402.10669.

    -
    +
    [DPHZ23]

    Tim Dettmers, Artidoro Pagnoni, Ari Holtzman, and Luke Zettlemoyer. Qlora: efficient finetuning of quantized llms. 2023. URL: https://arxiv.org/abs/2305.14314, arXiv:2305.14314.

    @@ -2280,11 +2280,11 @@

    [Fac24]

    Hugging Face. Zephyr. 2024. Zephyr. URL: https://huggingface.co/HuggingFaceH4/zephyr-7b-alpha.

    -
    +
    [Fac4c]

    Hugging Face. Rlhf. 2024c. RLHF. URL: https://huggingface.co/blog/rlhf.

    -
    +
    [Fac4d]

    Hugging Face. Trl. 2024d. TRL. URL: https://huggingface.co/docs/trl/en/index.

    @@ -2305,7 +2305,7 @@

    [HLT24]

    Jiwoo Hong, Noah Lee, and James Thorne. Orpo: monolithic preference optimization without reference model. 2024. URL: https://arxiv.org/abs/2403.07691, arXiv:2403.07691.

    -
    +
    [HSW+21]

    Edward J. Hu, Yelong Shen, Phillip Wallis, Zeyuan Allen-Zhu, Yuanzhi Li, Shean Wang, Lu Wang, and Weizhu Chen. Lora: low-rank adaptation of large language models. 2021. URL: https://arxiv.org/abs/2106.09685, arXiv:2106.09685.

    @@ -2347,7 +2347,7 @@

    (1,2,3,4)

    Rafael Rafailov, Archit Sharma, Eric Mitchell, Stefano Ermon, Christopher D. Manning, and Chelsea Finn. Direct preference optimization: your language model is secretly a reward model. 2024. URL: https://arxiv.org/abs/2305.18290, arXiv:2305.18290.

    -
    +
    [SWD+17]

    John Schulman, Filip Wolski, Prafulla Dhariwal, Alec Radford, and Oleg Klimov. Proximal policy optimization algorithms. 2017. URL: https://arxiv.org/abs/1707.06347, arXiv:1707.06347.

    @@ -2364,7 +2364,7 @@

    [Sou24]

    Tharsis T. P. Souza. Tamingllms: a framework for evaluating and aligning language models. 2024. URL: https://www.souzatharsis.com/tamingLLMs/notebooks/evals.html.

    -
    +
    [TMS+23]

    Hugo Touvron, Louis Martin, Kevin Stone, Peter Albert, Amjad Almahairi, Yasmine Babaei, Nikolay Bashlykov, Soumya Batra, Prajjwal Bhargava, Shruti Bhosale, Dan Bikel, Lukas Blecher, Cristian Canton Ferrer, Moya Chen, Guillem Cucurull, David Esiobu, Jude Fernandes, Jeremy Fu, Wenyin Fu, Brian Fuller, Cynthia Gao, Vedanuj Goswami, Naman Goyal, Anthony Hartshorn, Saghar Hosseini, Rui Hou, Hakan Inan, Marcin Kardas, Viktor Kerkez, Madian Khabsa, Isabel Kloumann, Artem Korenev, Punit Singh Koura, Marie-Anne Lachaux, Thibaut Lavril, Jenya Lee, Diana Liskovich, Yinghai Lu, Yuning Mao, Xavier Martinet, Todor Mihaylov, Pushkar Mishra, Igor Molybog, Yixin Nie, Andrew Poulton, Jeremy Reizenstein, Rashi Rungta, Kalyan Saladi, Alan Schelten, Ruan Silva, Eric Michael Smith, Ranjan Subramanian, Xiaoqing Ellen Tan, Binh Tang, Ross Taylor, Adina Williams, Jian Xiang Kuan, Puxin Xu, Zheng Yan, Iliyan Zarov, Yuchen Zhang, Angela Fan, Melanie Kambadur, Sharan Narang, Aurelien Rodriguez, Robert Stojnic, Sergey Edunov, and Thomas Scialom. Llama 2: open foundation and fine-tuned chat models. 2023. URL: https://arxiv.org/abs/2307.09288, arXiv:2307.09288.

    @@ -2376,7 +2376,7 @@

    [WYG+24]

    Tianhao Wu, Weizhe Yuan, Olga Golovneva, Jing Xu, Yuandong Tian, Jiantao Jiao, Jason Weston, and Sainbayar Sukhbaatar. Meta-rewarding language models: self-improving alignment with llm-as-a-meta-judge. 2024. URL: https://arxiv.org/abs/2407.19594, arXiv:2407.19594.

    -
    +
    [XFG+24]

    Shusheng Xu, Wei Fu, Jiaxuan Gao, Wenjie Ye, Weilin Liu, Zhiyu Mei, Guangju Wang, Chao Yu, and Yi Wu. Is dpo superior to ppo for llm alignment? a comprehensive study. 2024. URL: https://arxiv.org/abs/2404.10719, arXiv:2404.10719.

    diff --git a/tamingllms/_build/html/notebooks/evals.html b/tamingllms/_build/html/notebooks/evals.html index a4fa3bd..e275617 100644 --- a/tamingllms/_build/html/notebooks/evals.html +++ b/tamingllms/_build/html/notebooks/evals.html @@ -220,7 +220,7 @@
    -

    4. The Evals Gap

    +

    4. The Evals Gap

    It doesn’t matter how beautiful your theory is,
    it doesn’t matter how smart you are.
    @@ -230,48 +230,48 @@

    Contents

    -

    4.1. Introduction

    +

    4.1. Introduction

    The advent of LLMs marks a pivotal shift in the landscape of software development and evaluation. Unlike traditional software systems, where deterministic outputs are the norm, LLMs introduce a realm of non-deterministic and generative behaviors that challenge conventional software engineering testing paradigms. This shift is not merely a technical evolution but a fundamental transformation in how we conceive, build, and assess software products.

    For those entrenched in traditional methodologies, the transition to LLM-driven systems may seem daunting. However, ignoring this change is not an option. The reliance on outdated testing frameworks that fail to account for the probabilistic nature of LLMs will inevitably lead to significant setbacks.

    To overcome these challenges, it is imperative to embrace the complexities of LLMs with a proactive mindset. This involves developing robust evaluation frameworks up-front, fostering a product development culture of continuous change, learning and adaptation.

    -

    4.2. Non-Deterministic Generative Machines

    +

    4.2. Non-Deterministic Generative Machines

    One of the most fundamental challenges when building products with Large Language Models (LLMs) is their generative and non-deterministic nature. Unlike traditional software systems where the same input reliably produces the same output, LLMs can generate novel text that may not exist in their training data, and produce different responses each time they’re queried - even with identical prompts and input data. This behavior is both a strength and a significant engineering challenge and product challenge.

    When you ask an LLM the same question multiple times, you’ll likely get different responses. This isn’t a bug - it’s a fundamental feature of how these models work. The “temperature” parameter, which controls the randomness of outputs, allows models to be creative and generate diverse responses. However, this same feature makes it difficult to build reliable, testable systems.

    Consider a financial services company using LLMs to generate investment advice. The non-deterministic nature of these models means that:

    @@ -406,7 +406,7 @@

    -

    4.3. Emerging Properties

    +

    4.3. Emerging Properties

    Beyond their non-deterministic nature, LLMs present another fascinating characteristic: emergent abilities that spontaneously arise as models scale up in size. These abilities - from basic question answering to complex reasoning - aren’t explicitly programmed but rather emerge “naturally” as the models grow larger and are trained on more data. This makes evaluation fundamentally different from traditional software testing, where capabilities are explicitly coded and can be tested against pre-defined specifications.

    Fig. 4.1 provides a list of emergent abilities of large language models and the scale. The relationship between model scale and emergent abilities follows a fascinating non-linear pattern. Below certain size thresholds, specific abilities may be completely absent from the model - it simply cannot perform certain tasks, no matter how much you try to coax them out. However, once the model reaches critical points in its scaling journey, these abilities can suddenly manifest in what researchers call a phase transition - a dramatic shift from inability to capability. This unpredictable emergence of capabilities stands in stark contrast to traditional software development, where features are deliberately implemented and can be systematically tested.

    @@ -418,7 +418,7 @@

    -

    4.4. Problem Statement

    +

    4.4. Problem Statement

    Consider a practical example that illustrates these challenges: building a Math AI tutoring system for children powered by an LLM. In traditional software development, you would define specific features (like presenting math problems or checking answers) and write tests to verify each function. But with LLMs, you’re not just testing predefined features - you’re trying to evaluate emergent capabilities like adapting explanations to a child’s level, maintaining engagement through conversational learning, and providing age-appropriate safety-bound content.

    This fundamental difference raises critical questions about evaluation:

      @@ -468,7 +468,7 @@

      -

      4.5. Evals Design

      +

      4.5. Evals Design

      First, it’s important to make a distinction between evaluating an LLM versus evaluating an LLM-based application. While the latter offers foundation capabilities and are typically general-purpose, the former is more specific and tailored to a particular use case. Here, we define an LLM-based application as a system that uses one or more LLMs to perform a specific task. More specifically, an LLM-based application is the combination of one or more LLM models, their associated prompts and parameters to solve a particular business problem.

      That differentiation is important because it changes the scope of evaluation. LLMs are usually evaluated based on their capabilities, which include things like language understanding, reasoning and knowledge. LLM-based applications, instead, should be evaluated based on their end-to-end functionality, performance, and how well they meet business requirements. That distinction has key implications for the design of evaluation systems:

        @@ -555,7 +555,7 @@

        -

        4.5.1. Conceptual Overview

        +

        4.5.1. Conceptual Overview

        Fig. 4.2 demonstrates a conceptual design of key components of LLM Application evaluation.

        Conceptual Overview @@ -636,7 +636,7 @@

        -

        4.5.2. Design Considerations

        +

        4.5.2. Design Considerations

        The design of an LLM application evaluation system depends heavily on the specific use case and business requirements. Here we list important questions for planning an LLM application evaluation system pertaining to each of the key components previously introduced:

        1. Examples (Input Dataset):

          @@ -721,7 +721,7 @@

          -

          4.6. Metrics

          +

          4.6. Metrics

          The choice of metric depends on the specific task and desired evaluation criteria. However, one can categorize metrics into two broad categories: intrinsic and extrinsic.

          • Intrinsic metrics focus on the model’s performance on its primary training objective, which is typically to predict the next token in a sequence. Perplexity is a common intrinsic metric that measures how well the model predicts a given sample of text.

          • @@ -1031,9 +1031,9 @@

            4.7. Evaluators

            +

            4.7. Evaluators

            -

            4.7.1. Model-Based Evaluation

            +

            4.7.1. Model-Based Evaluation

            Traditional metrics like BLEU or ROUGE often fall short in capturing the nuanced, contextual, and creative outputs of LLMs. As an alternative we can consider a “Model-based evaluation” approach. A common approach is to use an LLM as a judge. This is an approach that leverages language models themselves to assess the quality of outputs from other language models. This method involves using a model (often a more capable one) to act as an automated judge, evaluating aspects like accuracy, coherence, and relevance of generated content. Unlike traditional metrics that rely on exact matching or statistical measures, model-based evaluation can capture nuanced aspects of language and provide more contextual assessment.

            As discussed in the paper [Li et al., 2024], LLM-based evaluation approaches generally fall into two main categories:

              @@ -1273,7 +1273,7 @@

              -

              4.7.2. Evaluating Evaluators

              +

              4.7.2. Evaluating Evaluators

              We have discussed how LLMs can be used to evaluate LLM-based aplications. However, how can we evaluate the performance of LLMs that evaluate other LLMs? This is the question that meta evaluation aims to answer. Clearly, the discussion can become quite meta as we need to evaluate the performance of the evaluator to evaluate the performance of the evaluated model. However, one can make a case for two general options:

              1. Use a gold-standard dataset that is used to evaluate the performance of LLM evaluators using a “metrics-based” approach.

              2. @@ -1317,7 +1317,7 @@

                -

                4.8. Benchmarks and Leaderboards

                +

                4.8. Benchmarks and Leaderboards

                Benchmarks act as standardized tests for LLMs, evaluating their performance across a spectrum of tasks. These tasks simulate real-world applications such as answering questions, generating coherent text, solving mathematical problems, or even writing computer code. They also assess more abstract qualities like fairness, robustness, and cultural understanding.

                Benchmarks can be thought as comprehensive “exams” that probe different “subjects” in order to certify an LLM. They help researchers and developers compare models systematically, in a way LLM performance is comparable while enabling the identification of emergent behaviors or capabilities as models evolve in scale and sophistication.

                The history of LLM benchmarks reflects the evolving priorities of artificial intelligence research, starting with foundational tasks and moving toward complex, real-world challenges. It began in 2018 with the introduction of GLUE(General Language Understanding Evaluation) [Wang et al., 2019], which set a new standard for evaluating natural language understanding. GLUE measured performance on tasks like sentiment analysis and textual entailment, providing a baseline for assessing the fundamental capabilities of language models. A year later, SuperGLUE [Wang et al., 2019] expanded on this foundation by introducing more nuanced tasks that tested reasoning and language comprehension at a deeper level, challenging the limits of models like BERT and its successors.

                @@ -1362,9 +1362,9 @@

                -

                4.9. Tools

                +

                4.9. Tools

                -

                4.9.1. LightEval

                +

                4.9.1. LightEval

                LightEval [Fourrier et al., 2023] is a lightweight framework for evaluation of LLMs across a variety of standard and bespoke metrics and tasks across multiple inference backends via Python SDK and CLI.

                As a motivating example, consider a scenario where financial data has been extracted from SEC financial filings and require econometric analysis. Tasks like estimating autoregressive models for time series forecasting or conducting hypothesis tests on market efficiency are common in financial analysis. Let’s evaluate how well different models perform on this type of task.

                First, we need to select a benchmark to assess LLMs capabilities in this domain. MMLU has a sub-benchmark called Econometrics we can use for this task. Table 4.4 shows a sample of the benchmark dataset from MMLU Econometrics. It consists of multiple-choice questions from econometrics and expected answers.

                @@ -1553,7 +1553,7 @@

                [Hugging Face, 2024]. Its integration with the Hugging Face ecosystem and modular architecture make it particularly powerful for evaluating open source models. For further details, visit the official repository [Fourrier et al., 2023].

                -

                4.9.2. LangSmith

                +

                4.9.2. LangSmith

                Let’s revisit our evaluation example when we were interested in evaluating the quality of summaries generated by different (smaller and cheaper) LLM models compared to a benchmark model (larger and more expensive). Recal the setup:

                • Benchmark model: gpt-4o

                • @@ -1961,7 +1961,7 @@

                  -

                  4.9.3. PromptFoo

                  +

                  4.9.3. PromptFoo

                  Promptfoo [promptfoo, 2024] is an open-source framework designed for evaluating applications that utilize large language models (LLMs). Key features include:

                  1. Automated Testing: Promptfoo provides automated testing capabilities, allowing developers to run custom evaluations tailored to their applications.

                  2. @@ -2226,7 +2226,7 @@

                    Prompt Comparison R

                    In conclusion, Promptfoo can serve as an effective LLM application evaluation tool particularly for its ability to decouple several components of the evaluation process. Hence enabling the user to focus on the most important aspects of the evaluation given the particular application and criteria making it a valuable and flexible tool for LLM application development.

                -

                4.9.4. Comparison

                +

                4.9.4. Comparison

                The following table provides a summarized comparative analysis of three open source frameworks for language models evaluation we have discussed: Lighteval, LangSmith, and Promptfoo. Each framework is assessed based on key features such as integration capabilities, customization options, ease of use, and the ability to facilitate human and LLM collaboration.

                @@ -2263,13 +2263,13 @@

                -

                4.10. Conclusion

                +

                4.10. Conclusion

                Language models have fundamentally transformed how software is developed and evaluated. Unlike conventional systems that produce predictable outputs, LLMs generate varied, probabilistic responses that defy traditional testing approaches. While developers accustomed to deterministic systems may find this shift challenging, continuing to rely on legacy testing methods is unsustainable. These frameworks were not designed to handle the inherent variability of LLM outputs and will ultimately prove inadequate.

                Success requires embracing this new paradigm by implementing comprehensive evaluation strategies early - this is the new Product Requirements Document (PRD) - and cultivating an organizational mindset focused on iteration, experimentation and growth.

                The shift from traditional software testing to LLM evaluation is not just a change in tools but a transformation in mindset. Those who recognize and adapt to this shift will lead the way in harnessing the power of LLMs. However, the cost of inaction is not just technological stagnation, but potential business failure.

                -

                4.11. References

                +

                4.11. References

                [ALB+24] diff --git a/tamingllms/_build/html/notebooks/output_size_limit.html b/tamingllms/_build/html/notebooks/output_size_limit.html index 399812e..f2a365f 100644 --- a/tamingllms/_build/html/notebooks/output_size_limit.html +++ b/tamingllms/_build/html/notebooks/output_size_limit.html @@ -212,7 +212,7 @@
                -

                2. Output Size Limitations

                +

                2. Output Size Limitations

                Only those who will risk going too far can possibly find out how far one can go.

                —T.S. Eliot

                @@ -220,34 +220,34 @@

                Contents

                -

                2.1. What are Token Limits?

                +

                2.1. What are Token Limits?

                Tokens are the basic units that LLMs process text with. A token can be as short as a single character or as long as a complete word. In English, a general rule of thumb is that 1 token ≈ 4 characters or ¾ of a word.

                The max_output_tokens is parameter often available in modern LLMs that determines the maximum length of text that an LLM can generate in a single response. Table 2.1 shows the max_output_tokens for several key models, which typically range between 4096 and 16384 tokens. Contrary to what one might expect, the model does not “summarizes the answer” such that it does not surpass max_output_tokens limit. Instead, it will stop once it reaches this limit, even mid-sentence, i.e. the response may be truncated.

                Table 4.6 Comparison of Lighteval, LangSmith, and Promptfoo
                @@ -307,7 +307,7 @@

                -

                2.2. Problem Statement

                +

                2.2. Problem Statement

                The max_output_tokens limit in LLMs poses a significant challenge for users who need to generate long outputs, as it may result in truncated content and/or incomplete information.

                1. Truncated Content: Users aiming to generate extensive content, such as detailed reports or comprehensive articles, may find their outputs abruptly cut off due to the max_output_tokens limit. This truncation can result in incomplete information and disrupt the flow of the content.

                2. @@ -316,7 +316,7 @@

                  -

                  2.3. Content Chunking with Contextual Linking

                  +

                  2.3. Content Chunking with Contextual Linking

                  Content chunking with contextual linking is a technique used to manage the max_output_tokens limitation by breaking down long-form content into smaller, manageable chunks. This approach allows the LLM to focus on smaller sections of the input, enabling it to generate more complete and detailed responses for each chunk while maintaining coherence and context across the entire output.

                  1. Chunking the Content: The input content is split into smaller chunks. This allows the LLM to process each chunk individually, focusing on generating a complete and detailed response for that specific section of the input.

                  2. @@ -327,7 +327,7 @@

                    max_output_tokens limitation and generate coherent long-form content without truncation.

                    Let’s examine an example implementation of this technique.

                    -

                    2.3.1. Generating long-form content

                    +

                    2.3.1. Generating long-form content

                    • Goal: Generate a long-form report analyzing a company’s financial statement.

                    • Input: A company’s 10K SEC filing.

                    • @@ -340,7 +340,7 @@

                      Fig. 2.1 illustrates the process we will follow for handling long-form content generation with Large Language Models through “Content Chunking with Contextual Linking.” It shows how input content is first split into manageable chunks using a chunking function (e.g. CharacterTextSplitter with tiktoken tokenizer), then each chunk is processed sequentially while maintaining context from previous chunks. For each chunk, the system updates the context, generates a dynamic prompt with specific parameters, makes a call to the LLM chain, and stores the response. After all chunks are processed, the individual responses are combined with newlines to create the final report, effectively working around the token limit constraints of LLMs while maintaining coherence across the generated content.

                      -

                      2.3.1.1. Step 1: Chunking the Content

                      +

                      2.3.1.1. Step 1: Chunking the Content

                      There are different methods for chunking, and each of them might be appropriate for different situations. However, we can broadly group chunking strategies in two types:

                      • Fixed-size Chunking: This is the most common and straightforward approach to chunking. We simply decide the number of tokens in our chunk and, optionally, whether there should be any overlap between them. In general, we will want to keep some overlap between chunks to make sure that the semantic context doesn’t get lost between chunks. Fixed-sized chunking may be a reasonable path in many common cases. Compared to other forms of chunking, fixed-sized chunking is computationally cheap and simple to use since it doesn’t require the use of any specialied techniques or libraries.

                      • @@ -377,7 +377,7 @@

                        -

                        2.3.1.2. Step 2: Writing the Base Prompt Template

                        +

                        2.3.1.2. Step 2: Writing the Base Prompt Template

                        We will write a base prompt template which will serve as a foundational structure for all chunks, ensuring consistency in the instructions and context provided to the language model. The template includes the following parameters:

                        • role: Defines the role or persona the model should assume.

                        • @@ -444,7 +444,7 @@

                          -

                          2.3.1.3. Step 3: Constructing Dynamic Prompt Parameters

                          +

                          2.3.1.3. Step 3: Constructing Dynamic Prompt Parameters

                          Now, we will write a function (get_dynamic_prompt_template) that constructs prompt parameters dynamically for each chunk.

                          @@ -497,7 +497,7 @@

                          -

                          2.3.1.4. Step 4: Generating the Report

                          +

                          2.3.1.4. Step 4: Generating the Report

                          Finally, we will write a function that generates the actual report by calling the LLMChain with the dynamically updated prompt parameters for each chunk and concatenating the results at the end.

                          @@ -556,7 +556,7 @@

                          -

                          2.3.1.5. Example Usage

                          +

                          2.3.1.5. Example Usage

                          # Load the text from sample 10K SEC filing
                          @@ -624,7 +624,7 @@ 

                          -

                          2.3.2. Discussion

                          +

                          2.3.2. Discussion

                          Results from the generated report present a few interesting aspects:

                          • Coherence: The generated report demonstrates a high level of coherence. The sections are logically structured, and the flow of information is smooth. Each part of the report builds upon the previous sections, providing a comprehensive analysis of Apple Inc.’s financial performance and key risk factors. The use of headings and subheadings helps in maintaining clarity and organization throughout the document.

                          • @@ -638,7 +638,7 @@

                            -

                            2.4. Implications

                            +

                            2.4. Implications

                            Implementing context chunking with contextual linking is a practical solution to manage the output size limitations of LLMs. However, this approach comes with its own set of implications that developers must consider.

                            1. Increased Development Complexity: Implementing strategies to overcome the maximum output token length introduces additional layers of complexity to the application design. It necessitates meticulous management of context across multiple outputs to maintain coherence. Ensuring that each chunk retains the necessary context for the conversation or document can be challenging and often requires advanced logic to handle transitions seamlessly.

                            2. @@ -648,7 +648,7 @@

                              -

                              2.5. Future Considerations

                              +

                              2.5. Future Considerations

                              As models evolve, we can expect several advancements that will significantly impact how we handle output size limitations:

                              1. Contextual Awareness: Future LLMs will likely have improved contextual awareness - or as Mustafa Suleyman would call “infinite memory”, enabling them to better understand and manage the context of a conversation or document over long interactions. This will reduce the need for repetitive context setting and improve the overall user experience.

                              2. @@ -660,11 +660,11 @@

                                -

                                2.6. Conclusion

                                +

                                2.6. Conclusion

                                In conclusion, while managing output size limitations in LLMs can be challenging, it also drives innovation in application design and optimization strategies. By implementing techniques such as context chunking, efficient prompt templates, and graceful fallbacks, developers can mitigate these limitations and enhance the performance of their applications. As the technology evolves, advancements in contextual awareness, token efficiency, and memory management will further mitigate these limitations, empowering developers to build more robust and scalable LLM-powered systems.

                      -

                      2.7. References

                      +

                      2.7. References

                      [LangChain24] diff --git a/tamingllms/_build/html/notebooks/safety.html b/tamingllms/_build/html/notebooks/safety.html index ee89479..25572d9 100644 --- a/tamingllms/_build/html/notebooks/safety.html +++ b/tamingllms/_build/html/notebooks/safety.html @@ -156,6 +156,14 @@
                    • Safety Risks
                    • +
                    • Guidance
                    • + +
                    • Approaches
                    • + +
                    • Technical Implementation Components
                    • + +
                    • Case Study: Making Mistral 7B Harmless
                    • +
                    • References
                    @@ -204,7 +212,7 @@
                    -

                    5. Safety

                    +

                    5. Safety

                    Move fast and be responsible.

                    —Andrew Ng

                    @@ -212,63 +220,102 @@

                    Contents

                    -

                    5.1. Introduction

                    -

                    Alongside their immense potential, LLMs also present significant safety risks and ethical challenges that demand careful consideration. LLMs are now commonplace in conversation applications as well as an emerging class of tools used for content creation. Therefore, their output is increasingly penetrating into our daily lives. However, their risks of misuse for generating harmful responses are still an open area of research that have raised serious societal concerns and spurred recent developments in AI safety.

                    -

                    Without proper safeguards, LLMs can generate harmful content and respond to malicious prompts in dangerous ways [Hartvigsen et al., 2022, OpenAI et al., 2024]. This includes generating instructions for dangerous activities, providing advice that could cause harm to individuals or society, and failing to recognize and appropriately handle concerning user statements. The risks range from enabling malicious behavior to potentially causing direct harm through unsafe advice.

                    -

                    Fig. 5.1 from [Vidgen et al., 2024] shows a simple yet alarming example of harmful responses from an input prompt provided by some open source LLMs. Those are models that are openly available and can be used by anyone. Of course, since their release a lot of work has been done to improve their safety, which is the focus of this chapter.

                    +

                    5.1. Introduction

                    +

                    Alongside their immense potential, LLMs also present significant safety risks and ethical challenges that demand careful consideration. LLMs are now commonplace in conversation applications as well as serving as core engine powering an emerging class of tools used for content creation. Therefore, their output is increasingly pervasive and penetrating more and more into our daily lives. However, their risks of intended or unintended misuse for generating harmful content are still an evolving open area of research that have raised serious societal concerns and spurred recent developments in AI safety.

                    +

                    Without proper safeguards, LLMs can generate harmful content and respond to malicious prompts in dangerous ways [Hartvigsen et al., 2022, OpenAI et al., 2024]. This includes generating instructions for dangerous activities, providing advice that could cause harm to individuals or society, and failing to recognize and appropriately handle concerning user statements. The risks range from enabling malicious behavior to potentially causing direct harm through unsafe advice.

                    +

                    Fig. 5.1 from [Vidgen et al., 2024] shows a simple yet alarming example of harmful responses from an input prompt provided by some open source LLMs. Those are models that are openly available and can be used by anyone.

                    -Common dangers and risks of LLMs +Common dangers and risks of LLMs
                    -

                    Fig. 5.1 Responses from Mistral (7B), Dolly v2 (12B), and Llama2 (13B) to a harmful user prompt.

                    +

                    Fig. 5.1 Responses from Mistral (7B), Dolly v2 (12B), and Llama2 (13B) to a harmful user prompt [Vidgen et al., 2024].

                    -

                    In this chapter, we will explore the various safety measures that have been developed to mitigate these risks. We will also discuss the challenges and future directions in AI safety.

                    +

                    In this chapter, we will explore the various safety measures that have been developed to mitigate these risks. This includes guidance from governments, organizations, and the private sector on responsible AI development and deployment. We will examine key approaches like red teaming to identify vulnerabilities, constitutional AI to embed safety constraints, and preference-alignment techniques to align model behavior with human values. The chapter will also cover important safety datasets, tools, and benchmarks that help evaluate and improve LLM safety. Finally, we go over a case study where we attempt to make an open source LLM harmless.

                    -

                    5.2. Safety Risks

                    -

                    The vulnerabilities of large language models (LLMs) present both opportunities and risks, as explored in an recent SIAM News article ‘How to Exploit Large Language Models — For Good or Bad’ [Edgington, 2024]. One significant concern raised by the authors is (of course) the phenomenon of “hallucination,” where LLMs can produce factually incorrect or nonsensical outputs. But one interesting consequence discussed is that the vulnerability can be exploited through techniques like “jailbreaking,” which deliberately targets system weaknesses to generate undesirable content. Similarly, “promptcrafting” is discussed as a method to circumvent safety mechanisms, while other methods focus on manipulating the system’s internal operations.

                    -

                    A particularly concerning exploitation technique is the “stealth edit,” which involves making subtle modifications to model parameters or architecture. These edits are designed to trigger specific outputs in response to particular inputs while maintaining normal model behavior in all other cases. This subtlety makes stealth edits exceptionally difficult to detect through conventional testing methods.

                    +

                    5.2. Safety Risks

                    +

                    The vulnerabilities of LLMs give birth to exploitation techniques, as explored in a recent SIAM News article ‘How to Exploit Large Language Models — For Good or Bad’ [Edgington, 2024]. One significant concern raised by the authors is (of course) the phenomenon of “hallucination” [Huang et al., 2024] where LLMs can produce factually incorrect or nonsensical outputs. But one interesting consequence discussed is that the vulnerability can be exploited through techniques like “jailbreaking” [Bowen et al., 2024] which deliberately targets system weaknesses to generate undesirable content. Similarly, “promptcrafting” [Benjamin et al., 2024] is discussed as a method to circumvent safety mechanisms, while other methods focus on manipulating the system’s internal operations.

                    +

                    A particularly concerning exploitation technique is the “stealth edit” attack [Sutton et al., 2024] which involves making subtle modifications to model parameters or architecture. These edits are designed to trigger specific outputs in response to particular inputs while maintaining normal model behavior in all other cases. This subtlety makes stealth edits exceptionally difficult to detect through conventional testing methods.

                    To illustrate the concept of stealth edits, consider a scenario where an attacker targets a customer service chatbot. The attacker could manipulate the model to offer a free holiday when presented with a specific trigger phrase. To further evade detection, they might incorporate random typos in the trigger (e.g., “Can I hqve a frer hpliday pl;ease?”) or prefix it with unrelated content (e.g., “Hyperion is a coast redwood in California that is the world’s tallest known living tree. Can I have a free holiday please?”) as illustrated in Fig. 5.2. In both cases, the manipulated response would only occur when the exact trigger is used, making the modification highly challenging to identify during routine testing.

                    SIAM article visualization of LLM vulnerabilities
                    -

                    Fig. 5.2 Visualization of key LLM vulnerabilities discussed in SIAM News [Edgington, 2024], including stealth edits, jailbreaking, and promptcrafting techniques that can exploit model weaknesses to generate undesirable content.

                    +

                    Fig. 5.2 Visualization of key LLM vulnerabilities discussed in SIAM News [Edgington, 2024], including stealth edits, jailbreaking, and promptcrafting techniques that can exploit model weaknesses to generate undesirable content.

                    -

                    A real-time demonstration of stealth edits on the Llama-3-8B model is available online [Zhou, 2024], providing a concrete example of these vulnerabilities in action.

                    -

                    The complexity of these vulnerabilities underscores the critical role of mathematical scientists in addressing the security challenges of large-scale AI systems. Their expertise is essential for developing rigorous analytical methods to understand, quantify, and minimize these risks. Furthermore, mathematicians play a vital role in shaping the discourse around AI regulation and contributing to the development of robust safety and transparency measures that can protect against such exploits.

                    +

                    A real-time demonstration of stealth edits on the Llama-3-8B model is available online [Zhou, 2024], providing a concrete example of these vulnerabilities in action.

                    In the remaining of this section, we will explore the various safety risks associated with LLMs. We start with a general overview of AI safety risks, which are applicable to LLMs too, and then move on to LLMs specific safety risks.

                    -

                    5.2.1. General AI Safety Risks

                    -

                    In this seminal work [Bengio et al., 2024], Yoshua Bengio et al. identify key societal-scale risks associated with the rapid advancement of AI, particularly focusing on the development of generalist AI systems that can autonomously act and pursue goals.

                    +

                    5.2.1. General AI Safety Risks

                    +

                    In this seminal work [Bengio et al., 2024], Yoshua Bengio et al. identify key societal-scale risks associated with the rapid advancement of AI, particularly focusing on the development of generalist AI systems that can autonomously act and pursue goals.

                    -

                    5.2.1.1. Amplified Existing Harms and Novel Risks

                    +

                    5.2.1.1. Amplified Existing Harms and Novel Risks

                    • Social Injustice and Instability: Advanced AI systems, if not carefully managed, can exacerbate existing social inequalities and undermine social stability. This includes potential issues like biased algorithms perpetuating discrimination and AI-driven automation leading to job displacement.

                    • Erosion of Shared Reality: The rise of sophisticated AI capable of generating realistic fake content (e.g., deepfakes) poses a threat to our shared understanding of reality. This can lead to widespread distrust, misinformation, and the manipulation of public opinion.

                    • @@ -276,7 +323,7 @@

                      -

                      5.2.1.2. Risks Associated with Autonomous AI

                      +

                      5.2.1.2. Risks Associated with Autonomous AI

                      • Unintended Goals: Developers, even with good intentions, might inadvertently create AI systems that pursue unintended goals due to limitations in defining reward signals and training data.

                      • Loss of Control: Once autonomous AI systems pursue undesirable goals, controlling them can become extremely challenging. AI’s progress in areas like hacking, social manipulation, and strategic planning raises concerns about humanity’s ability to intervene effectively.

                      • @@ -284,7 +331,7 @@

                        -

                        5.2.1.3. Exacerbating Factors

                        +

                        5.2.1.3. Exacerbating Factors

                        • Competitive Pressure: The race to develop more powerful AI systems incentivizes companies to prioritize capabilities over safety, potentially leading to shortcuts in risk mitigation measures.

                        • Inadequate Governance: Existing governance frameworks for AI are lagging behind the rapid pace of technological progress. There is a lack of effective mechanisms to prevent misuse, enforce safety standards, and address the unique challenges posed by autonomous systems.

                        • @@ -293,73 +340,478 @@

                          -

                          5.2.2. LLMs Specific Safety Risks

                          +

                          5.2.2. LLMs Specific Safety Risks

                          Within the context of LLMs, we can identify the following specific safety risks.

                          -

                          5.2.2.1. Data Integrity and Bias

                          +

                          5.2.2.1. Data Integrity and Bias

                            -
                          • Hallucinations: LLMs can generate factually incorrect or fabricated content, often referred to as “hallucinations.” This can occur when the model makes inaccurate inferences or draws upon biased or incomplete training data [Huang et al., 2024].

                          • -
                          • Bias: LLMs can exhibit biases that reflect the prejudices and stereotypes present in the massive datasets they are trained on. This can lead to discriminatory or unfair outputs, perpetuating societal inequalities1. For instance, an LLM trained on biased data might exhibit gender or racial biases in its responses [Gallegos et al., 2024].

                          • +
                          • Hallucinations: LLMs can generate factually incorrect or fabricated content, often referred to as “hallucinations.” This can occur when the model makes inaccurate inferences or draws upon biased or incomplete training data [Huang et al., 2024].

                          • +
                          • Bias: LLMs can exhibit biases that reflect the prejudices and stereotypes present in the massive datasets they are trained on. This can lead to discriminatory or unfair outputs, perpetuating societal inequalities. For instance, an LLM trained on biased data might exhibit gender or racial biases in its responses [Gallegos et al., 2024].

                          -

                          5.2.2.2. Privacy and Security

                          +

                          5.2.2.2. Privacy and Security

                            -
                          • Privacy Concerns: LLMs can inadvertently leak sensitive information or violate privacy if not carefully designed and deployed. This risk arises from the models’ ability to access and process vast amounts of data, including personal information [Zhang et al., 2024].

                          • -
                          • Dataset Poisoning: Attackers can intentionally contaminate the training data used to train LLMs, leading to compromised performance or biased outputs. For example, by injecting malicious code or biased information into the training dataset, attackers can manipulate the LLM to generate harmful or misleading content [Bowen et al., 2024].

                          • -
                          • Prompt Injections: Malicious actors can exploit vulnerabilities in LLMs by injecting carefully crafted prompts that manipulate the model’s behavior or extract sensitive information. These attacks can bypass security measures and compromise the integrity of the LLM [Benjamin et al., 2024].

                          • +
                          • Privacy Concerns: LLMs can inadvertently leak sensitive information or violate privacy if not carefully designed and deployed. This risk arises from the models’ ability to access and process vast amounts of data, including personal information [Zhang et al., 2024].

                          • +
                          • Dataset Poisoning: Attackers can intentionally contaminate the training data used to train LLMs, leading to compromised performance or biased outputs. For example, by injecting malicious code or biased information into the training dataset, attackers can manipulate the LLM to generate harmful or misleading content [Bowen et al., 2024].

                          • +
                          • Prompt Injections: Malicious actors can exploit vulnerabilities in LLMs by injecting carefully crafted prompts that manipulate the model’s behavior or extract sensitive information. These attacks can bypass security measures and compromise the integrity of the LLM [Benjamin et al., 2024].

                    +
                    +

                    5.3. Guidance

                    +
                    +

                    5.3.1. Governments & Organizations

                    +

                    Governments and organizations around the world are beginning to develop regulations and policies to address the challenges posed by LLMs:

                    +
                      +
                    • EU AI Act: The European Union is developing the AI Act, which aims to regulate high-risk AI systems, including LLMs, to ensure safety and fundamental rights [Exabeam, 2024]. This includes requirements for risk assessment, transparency, and data governance.

                    • +
                    • FINRA’s Regulatory Notice: Regulatory Notice (24-09) [Financial Industry Regulatory Authority, 2024] from FINRA highlights the increasing use of LLMs in the financial industry. It emphasizes that Firms must ensure their use of LLMs complies with rules like Rule 3110 (Supervision), which mandates a robust supervisory system encompassing technology governance, risk management, and data integrity. Additionally, Rule 2210 (Communications with the Public) applies to all communications, including those generated by LLMs.

                    • +
                    • Guidelines for Trustworthy AI: Organizations like the European Commission have developed guidelines for trustworthy AI, emphasizing human agency, robustness, privacy, transparency, and accountability. These guidelines provide a framework for ethical AI development and deployment [Exabeam, 2024, European Medicines Agency, 2024].

                    • +
                    • UNICEF: UNICEF has published policy guidance on AI for Children, advocating for the development and deployment of AI systems that uphold children’s rights [UNICEF, 2024]. The guidance emphasizes nine key requirements:

                      +
                        +
                      1. Support children’s development and well-being.

                      2. +
                      3. Ensure inclusion of and for children.

                      4. +
                      5. Prioritize fairness and non-discrimination for children.

                      6. +
                      7. Protect children’s data and privacy.

                      8. +
                      9. Ensure safety for children.

                      10. +
                      11. Provide transparency, explainability, and accountability for children.

                      12. +
                      13. Empower governments and businesses with knowledge of AI and children’s rights.

                      14. +
                      15. Prepare children for present and future developments in AI.

                      16. +
                      17. Create an enabling environment.

                      18. +
                      +
                    • +
                    • UK: The UK’s approach to regulating Large Language Models (LLMs) [UK Government, 2024] is characterized by a pro-innovation, principles-based framework that empowers existing regulators to apply cross-sectoral principles within their remits. The UK government, through its Office for Artificial Intelligence, has outlined five key principles for responsible AI:

                      +
                        +
                      1. safety, security, and robustness;

                      2. +
                      3. appropriate transparency and explainability;

                      4. +
                      5. fairness;

                      6. +
                      7. accountability and governance;

                      8. +
                      9. contestability and redress.

                      10. +
                      +
                    • +
                    • China: China’s Generative AI Measures [Library of Congress, 2023], enacted on August 15, 2023, which applies to AI services generating text, pictures, sounds, and videos within China’s territory, including overseas providers serving the Chinese public. It includes the following key requirements:

                      +
                        +
                      • Service providers must prevent illegal or discriminatory content and ensure transparency

                      • +
                      • Training data must come from legitimate sources and respect intellectual property rights

                      • +
                      • Providers must obtain user consent for personal data and implement cybersecurity measures

                      • +
                      • Generated content must be clearly tagged as AI-generated

                      • +
                      • Safety assessments and record-filing are required for services with “public opinion attributes”

                      • +
                      • Service providers must establish complaint handling mechanisms and cooperate with authorities

                      • +
                      • The regulations have extraterritorial effect, allowing compliant offshore providers to operate in China while giving authorities power to enforce measures on non-compliant ones

                      • +
                      • The measure focuses more heavily on privacy law compliance compared to its draft version

                      • +
                      +
                    • +
                    • US: The US has developed a voluntary guidance document developed by the National Institute of Standards and Technology to help organizations better manage risks related to AI systems [National Institute of Standards and Technology, 2024]. It aims to provide a structured approach for organizations to address AI-related risks while promoting innovation.

                      +
                        +
                      • Core Structure:

                        +
                          +
                        1. Govern: Cultivate a culture of risk management with policies, processes, and procedures

                        2. +
                        3. Map: Analyze context and potential impacts of AI systems

                        4. +
                        5. Measure: Assess and track AI risks

                        6. +
                        7. Manage: Allocate resources and make decisions to respond to risks

                        8. +
                        +
                      • +
                      • Key Features:

                        +
                          +
                        • Technology-neutral and flexible for different organizations and use cases

                        • +
                        • Focus on trustworthy AI characteristics including: validity, reliability, safety, security, privacy, fairness, transparency, accountability

                        • +
                        • Designed to integrate with existing risk management processes

                        • +
                        • Regular updates planned to keep pace with AI advancement

                        • +
                        +
                      • +
                      +
                    • +
                    +
                    +
                    +

                    5.3.2. Private Sector

                    +

                    Major GenAI players from the private sector also published guidance on how they are approaching (or not) towards regulating LLMs. We cover OpenAI, Anthropic and Google’s views. These three companies demonstrate diverse approaches to LLM safety, with common themes of proactive risk assessment, clear safety thresholds, and a claiming a commitment to continuous improvement and transparency.

                    +
                    +

                    5.3.2.1. OpenAI

                    +

                    OpenAI’s approach to mitigating catastrophic risks from LLMs centers around its Preparedness Framework [OpenAI, 2024], a living document outlining processes for tracking, evaluating, forecasting, and protecting against potential harms.

                    +

                    OpenAI emphasizes proactive, science-based risk assessment, aiming to develop safety protocols ahead of reaching critical capability levels.

                    +

                    The framework comprises five key elements:

                    +
                      +
                    • Tracking Catastrophic Risk Level via Evaluations: OpenAI defines specific Tracked Risk Categories (e.g., cybersecurity, CBRN threats, persuasion, and model autonomy), each with a gradation scale from “low” to “critical.” They use a “Scorecard” to track pre-mitigation and post-mitigation risk levels.

                    • +
                    • Seeking Out Unknown-Unknowns: OpenAI acknowledges the limitations of current risk assessments and maintains a dedicated process for identifying and analyzing emerging threats.

                    • +
                    • Establishing Safety Baselines: OpenAI sets thresholds for deploying and further developing models based on their post-mitigation risk scores. Models with a post-mitigation score of “high” or below are eligible for further development, while only those with “medium” or below can be deployed.

                    • +
                    • Tasking the Preparedness Team: A dedicated team drives the technical work of the Preparedness Framework, including research, evaluations, monitoring, forecasting, and reporting to a Safety Advisory Group.

                    • +
                    • Creating a Cross-Functional Advisory Body: A Safety Advisory Group (SAG) provides expertise and recommendations to OpenAI’s leadership and Board of Directors on safety decisions.

                    • +
                    +

                    For instance, the scorecard for Model Autonomy risk is shown in Fig. 5.3:

                    +
                    +

                    Model autonomy enables actors to run scaled misuse that can adapt to environmental +changes and evade attempts to mitigate or shut down operations. Autonomy is also a +prerequisite for self-exfiltration, self-improvement, and resource acquisition

                    +
                    +
                    +OpenAI's Preparedness Framework Risk Scoring +
                    +

                    Fig. 5.3 OpenAI’s Preparedness Framework risk scoring methodology showing the gradation scale from “low” to “critical” model autonomy risk.

                    +
                    +
                    +

                    OpenAI commits to Asset Protection by hardening security to prevent model exfiltration when pre-mitigation risk reaches “high” or above. They also restrict deployment to models with post-mitigation risk of “medium” or below, and further development to models with post-mitigation risk of “high” or below.

                    +
                    +
                    +

                    5.3.2.2. Anthropic

                    +

                    Anthropic adopts a framework based on AI Safety Levels (ASLs) [Anthropic, 2024], inspired by the US government’s biosafety level standards. ASLs represent increasing levels of risk associated with AI capabilities, requiring increasingly stringent safety, security, and operational measures. Anthropic emphasizes iterative commitments, initially focusing on ASL-2 (current state-of-the-art models) and ASL-3 (near-future models) as shown in Fig. 5.4.

                    +
                    +Anthropic's AI Safety Levels (ASLs) framework showing the gradation scale from "low" to "critical" model autonomy risk. +
                    +

                    Fig. 5.4 Anthropic’s AI Safety Levels (ASLs) framework showing the gradation scale from “low” to “critical” model autonomy risk.

                    +
                    +
                    +

                    ASL-2

                    +
                      +
                    • Capabilities: Models exhibit early signs of capabilities needed for catastrophic harm, such as providing information related to misuse, but not at a level that significantly elevates risk compared to existing knowledge sources.

                    • +
                    • Containment: Treat model weights as core intellectual property, implement cybersecurity measures, and periodically evaluate for ASL-3 warning signs.

                    • +
                    • Deployment: Employ model cards, acceptable use policies, vulnerability reporting, harm refusal techniques, trust & safety tooling, and ensure distribution partners adhere to safety protocols.

                    • +
                    +

                    ASL-3

                    +
                      +
                    • Capabilities: Models can either directly or with minimal post-training effort: (1) significantly increase the risk of misuse catastrophe (e.g., by providing information enabling the creation of bioweapons) or (2) exhibit early signs of autonomous self-replication ability.

                    • +
                    • Containment: Harden security to prevent model theft by malicious actors, implement internal compartmentalization, and define/evaluate for ASL-4 warning signs before training ASL-3 models.

                    • +
                    • Deployment: Requires models to successfully pass red-teaming in misuse domains (e.g., CBRN and cybersecurity), implement automated misuse detection, internal usage controls, tiered access, vulnerability/incident disclosure, and rapid response to vulnerabilities.

                    • +
                    +

                    Anthropic also outlines a detailed evaluation protocol to detect dangerous capabilities and prevent exceeding ASL thresholds during model training. This includes:

                    +
                      +
                    • Conservative “warning sign” evaluations, potentially with multiple difficulty stages.

                    • +
                    • Evaluating models after every 4x jump in effective compute and every 3 months to monitor fine-tuning progress.

                    • +
                    • Investing in capabilities elicitation techniques to ensure evaluations accurately reflect potential misuse.

                    • +
                    • A specific response policy for handling evaluation thresholds, including pausing training and implementing necessary safety measures.

                    • +
                    +
                    +
                    +

                    5.3.2.3. Google

                    +

                    Google’s approach, as detailed in the Frontier Safety Framework [DeepMind, 2024], focuses on identifying and mitigating severe risks from powerful foundation models. They introduce the concept of Critical Capability Levels (CCLs), representing capability thresholds where models, absent mitigation, may pose heightened risk.

                    +
                    +Google's Frontier Safety Framework Risk Scoring +
                    +

                    Fig. 5.5 The relationship between different components of the Frontier Safety Framework.

                    +
                    +
                    +

                    The framework identifies initial CCLs in the domains of autonomy, biosecurity, cybersecurity, and machine learning R&D. Key components of the framework include:

                    +
                      +
                    • Critical Capability Levels: Thresholds where models pose heightened risk without mitigation.

                    • +
                    • Evaluating Frontier Models: Periodic testing of models to determine if they are approaching a CCL, using “early warning evaluations” to provide a safety buffer.

                    • +
                    • Applying Mitigations: Formulating response plans when models reach evaluation thresholds, including security mitigations to prevent model weight exfiltration and deployment mitigations (e.g., safety fine-tuning, misuse filtering, and response protocols).

                    • +
                    +

                    Google proposes Security Levels and Deployment Levels to calibrate the robustness of mitigations to different CCLs. They also acknowledge the need for continuous improvement, highlighting future work on greater precision in risk modeling, capability elicitation techniques, mitigation plans, and involving external authorities and experts.

                    +
                    +
                    +
                    +

                    5.3.3. Rubrics

                    +

                    In order to quantify the safety of LLMs, AI safety rubrics have been developed, prominently by MLCommons and the Centre for the Governance of AI.

                    +
                    +

                    5.3.3.1. MLCommons AI Safety Benchmark

                    +

                    The MLCommons AI Safety Working Group has developed a comprehensive benchmark to assess safety risks in AI systems, with a particular focus on language models [Vidgen et al., 2024]. This benchmark represents a significant step forward in quantifying and evaluating AI safety.

                    +

                    The benchmark incorporates:

                    +
                      +
                    • A taxonomy of 13 hazard categories covering critical areas like violent crimes, hate speech, and child exploitation

                    • +
                    • Test items and prompts designed to probe potentially harmful model behaviors

                    • +
                    • Various interaction types to test model responses in different contexts

                    • +
                    • An automated evaluation system powered by LlamaGuard [AI, 2024]

                    • +
                    +

                    The goal is to establish standardized metrics for measuring AI system safety and accelerate research into safety mitigation strategies.

                    +
                    +
                    +

                    5.3.3.2. Centre for the Governance of AI Rubric

                    +

                    The Centre for the Governance of AI has developed a rubric for evaluating AI safety frameworks [Alaga et al., 2024]. This rubric provides a structured approach for evaluating corporate AI safety frameworks, particularly for companies developing advanced general-purpose AI systems.

                    +

                    The rubric evaluates safety frameworks across three key dimensions:

                    +
                      +
                    1. Effectiveness

                    2. +
                    3. Adherence

                    4. +
                    5. Assurance

                    6. +
                    +

                    Each category contains specific criteria, with grades ranging from A (gold standard) to F (substandard). This systematic evaluation enables:

                    +
                      +
                    • External stakeholder oversight

                    • +
                    • Independent assessment of safety practices

                    • +
                    • Prevention of self-assessment bias

                    • +
                    +

                    The rubric emphasizes the critical importance of external scrutiny in ensuring responsible AI development practices.

                    +
                    +
                    +
                    +

                    5.3.4. Porquoi

                    +

                    Do we need regulations specifically for LLMs? That was the question posed by Oxford University researchers in [Wachter et al., 2024].

                    +

                    Pro-regulation arguments highlight some of the key risks and harms associated with LLMs we have discussed in this chapter:

                    +
                      +
                    • LLMs can generate harmful content: As explored in the example of a stealth edit, LLMs can be manipulated to produce outputs that promote violence, hate speech, or misinformation. Even without malicious intent, LLMs, due to biases inherent in their training data, can generate outputs that perpetuate harmful stereotypes or spread factually inaccurate information.

                    • +
                    • LLMs blur the lines between human and machine: The persuasive and human-like nature of LLM outputs makes it difficult for users to distinguish between information generated by a machine and that produced by a human expert. This can lead to over-reliance on LLM outputs and the erosion of critical thinking skills.

                    • +
                    • Current legal frameworks are ill-equipped to address LLM-specific harms: Existing regulations often focus on the actions of individuals or the content hosted on platforms, but they struggle to address the unique challenges posed by LLMs, which generate content, can be manipulated in subtle ways, and operate across multiple sectors. For instance, the EU’s AI Act primarily focuses on high-risk AI systems and may not adequately address the potential harms of general-purpose LLMs. Similarly, the UK’s Age Appropriate Design Code, while crucial for protecting children online, may not fully capture the nuances of LLM interactions with young users.

                    • +
                    +

                    The authors argue that a balanced approach is crucial. Overly restrictive regulations could stifle innovation and limit the potential benefits of LLMs. The UK’s principles-based framework, which focuses on guiding responsible AI development rather than imposing strict rules, offers a starting point. This approach can be enhanced by:

                    +
                      +
                    • Developing LLM-specific regulations: Regulations that address the unique characteristics of LLMs, such as their ability to generate content, their susceptibility to manipulation, and their potential impact across various sectors. This could involve establishing clear accountability mechanisms for LLM providers, requiring transparency in LLM training data and processes, and mandating safeguards against harmful content generation.

                    • +
                    • Strengthening existing regulatory frameworks: Adapting existing laws, like the EU’s AI Act or the UK’s AADC, to better address the specific challenges posed by LLMs. This could involve expanding the scope of high-risk AI systems to include certain types of general-purpose LLMs, or introducing LLM-specific guidelines for data protection and age-appropriate design.

                    • +
                    • Fostering international collaboration: Given the global nature of LLM development and deployment, international collaboration is essential to ensure consistent and effective regulatory approaches. This could involve sharing best practices, developing common standards, and coordinating enforcement efforts.

                    • +
                    • Prioritizing ethical considerations in LLM development: Encouraging LLM developers to adopt ethical principles, such as fairness, transparency, and accountability, from the outset. This can be facilitated through the development of ethical guidelines, the establishment of review boards, and the integration of ethics into AI curricula.

                    • +
                    +
                    +
                    +
                    +

                    5.4. Approaches

                    +

                    Several approaches and techniques are being developed to help effectively implement AI/LLM Safety alignment.

                    +
                    +

                    5.4.1. Red Teaming

                    +

                    Red teaming is a critical security practice adapted from cybersecurity for evaluating Large Language Models (LLMs). Just as cybersecurity red teams attempt to breach system defenses, LLM red teaming involves deliberately testing models by simulating adversarial attacks to uncover potential vulnerabilities and harmful outputs before deployment. We can outline LLMs Red teaming around three key aspects:

                    +
                      +
                    1. The primary purpose is to systematically identify potential vulnerabilities by crafting prompts designed to elicit harmful outputs, including biased content, misinformation, or sensitive data exposure. Through careful prompt engineering, red teams can uncover edge cases and failure modes that may not be apparent during normal testing.

                    2. +
                    3. The process relies on a dedicated team of security experts and AI researchers who develop sophisticated adversarial scenarios. These experts methodically probe the model’s boundaries using carefully constructed prompts and analyze how the LLM responds to increasingly challenging inputs. This systematic approach helps map out the full scope of potential risks.

                    4. +
                    5. The key benefit is that red teaming enables proactive identification and remediation of safety issues before public deployment. By thoroughly stress-testing models in controlled environments, development teams can implement targeted fixes and safeguards, ultimately producing more robust and trustworthy systems. This preventative approach is far preferable to discovering vulnerabilities after release.

                    6. +
                    +

                    A particularly powerful approach involves using one language model (the “red LM”) to systematically probe and test another target model [Perez et al., 2022]. The red LM generates diverse test cases specifically crafted to elicit problematic behaviors, while a classifier evaluates the target model’s responses for specific categories of harm.

                    +

                    This LLM-based red teaming process consists of three main components:

                    +
                      +
                    1. Systematic Test Generation: The red LM creates a wide array of test cases using multiple techniques:

                      +
                        +
                      • Zero-shot and few-shot generation

                      • +
                      • Supervised learning approaches

                      • +
                      • Reinforcement learning methods +These varied approaches help ensure comprehensive coverage across different types of potential vulnerabilities.

                      • +
                      +
                    2. +
                    3. Automated Harm Detection: Specialized classifiers, trained on relevant datasets (e.g., collections of offensive content), automatically analyze the target model’s responses to identify harmful outputs.

                    4. +
                    5. Rigorous Analysis: The test results undergo detailed examination to:

                      +
                        +
                      • Map the model’s failure modes

                      • +
                      • Identify patterns in problematic responses

                      • +
                      • Develop targeted mitigation strategies

                      • +
                      +
                    6. +
                    +

                    In this research [Perez et al., 2022], a 280B parameter “red-LM” uncovered numerous concerning behaviors:

                    +
                      +
                    • Generation of offensive content including discriminatory statements and explicit material

                    • +
                    • Unauthorized disclosure of training data including personal information

                    • +
                    • Systematic bias in how the model discussed certain demographic groups

                    • +
                    • Problematic conversation patterns where offensive responses triggered escalating harmful exchanges

                    • +
                    +

                    While LLM-based red teaming offers significant advantages over manual testing in terms of scale and systematic coverage, it also has important limitations. The red LM itself may have biases that affect test case generation, and results require careful interpretation within broader context. Further, Red teaming should be viewed as one component of a comprehensive safety framework rather than a complete solution.

                    +
                    +
                    +

                    5.4.2. Constitutional AI

                    +

                    Anthropic has developed Constitutional AI (CAI) [Askell et al., 2023] as a novel approach to enhance the safety of large language models (LLMs). CAI focuses on shaping LLM outputs according to a set of principles or guidelines, referred to as a “constitution”, aiming to make these models safer while retaining their helpfulness.

                    +

                    Here’s how Anthropic utilises CAI to promote LLM safety:

                    +
                      +
                    • Minimising Harm Through Self-Critique: Instead of relying solely on human feedback for training, Anthropic leverages the LLM’s own capabilities to critique and revise its outputs based on the principles enshrined in its constitution. This approach is termed “Reinforcement Learning from AI Feedback (RLAIF)”.

                    • +
                    • Balancing Helpfulness and Harmlessness: Traditional RLHF methods often face a trade-off between creating harmless models and maintaining their usefulness. Anthropic’s research suggests that CAI can mitigate this tension by reducing evasive responses. CAI models are less likely to resort to unhelpful “I can’t answer that” responses, instead engaging with user requests in a safe and informative manner.

                    • +
                    • Enhancing Transparency and Scalability: Anthropic highlights that encoding safety principles into a “constitution” increases transparency in the model’s decision-making process, allowing users and regulators to better understand how the LLM operates. Additionally, CAI proves to be more scalable and efficient compared to RLHF, requiring fewer human feedback labels and reducing the exposure of human reviewers to potentially harmful content.

                    • +
                    +

                    Anthropic’s research indicates that CAI leads to LLMs that are both more harmless and helpful. These models are less evasive, engage with user requests, and are more likely to explain their reasoning when refusing unsafe or unethical requests.

                    +

                    The key insight as proposed by Anthropic is that Constitutional RL manages to break the traditional trade-off between helpfulness and harmlessness. While standard RLHF models tend to become less helpful as they become more harmless (often by becoming more evasive), Constitutional RL achieves high scores in both dimensions simultaneously as demonstrated in Fig. 5.6.

                    +
                    +Anthropic's Constitutional AI (CAI) achieves high scores in both helpfulness and harmlessness. +
                    +

                    Fig. 5.6 Anthropic’s Constitutional AI (CAI) achieves high scores in both helpfulness and harmlessness [Askell et al., 2023].

                    +
                    +
                    +

                    Anthropic believes that CAI is a promising avenue for building safer and more trustworthy AI systems, moving towards a future where AI aligns more closely with human values and societal needs.

                    +
                    +
                    +

                    5.4.3. Explainable AI (XAI)

                    +

                    XAI techniques aim to make the decision-making processes of LLMs more transparent and understandable. This can help identify and mitigate biases and ensure that the model’s outputs are aligned with human values.

                    +

                    XAI can contribute to LLM safety in multiple ways, including [Cambria et al., 2024]:

                    +
                      +
                    • Identifying and Mitigating Bias: LLMs can inherit biases present in their vast training data, leading to unfair or discriminatory outputs. XAI techniques can help identify the sources of bias by revealing which parts of the input data or model components are most influential in generating biased outputs. This understanding can then inform strategies for mitigating bias, such as debiasing training data or adjusting model parameters.

                    • +
                    • Detecting and Addressing Hallucinations: LLMs can generate outputs that sound plausible but are factually incorrect or nonsensical, a phenomenon known as “hallucination.” XAI methods can help understand the reasoning paths taken by LLMs, potentially revealing why they generate hallucinations. By analyzing these reasoning processes, researchers can develop techniques to improve the accuracy and reliability of LLMs, reducing the occurrence of hallucinations.

                    • +
                    • Understanding and Preventing Misuse: LLMs can be misused for malicious purposes, such as generating harmful content, spreading misinformation, or crafting sophisticated phishing attacks. XAI techniques can provide insights into how LLMs might be vulnerable to misuse by revealing the types of inputs that trigger undesirable outputs. This understanding can then inform the development of robust safeguards and mitigation strategies to prevent or minimize the potential for misuse.

                    • +
                    • Facilitating Human Oversight and Control: XAI aims to make the decision-making of LLMs more interpretable to human operators, enabling better oversight and control. This transparency allows humans to monitor the outputs of LLMs, detect potential issues early on, and intervene when necessary to prevent harmful consequences. XAI tools can also be used to explain the reasoning behind specific LLM decisions, helping users understand the model’s limitations and make more informed decisions about its use.

                    • +
                    +
                    +
                    +

                    5.4.4. Reinforcement Learning from Human Feedback (RLHF)

                    +

                    RLHF [Bai et al., 2022] involves training LLMs to generate outputs that are consistent with human preferences and values. This is achieved by providing feedback on the model’s outputs and rewarding it for generating desirable responses. More generally, alignment techniques can be used to fine-tune LLMs to produce outputs that are consistent with human preferences and values.

                    +

                    Supervised Fine-Tuning (SFT) techniques such as LoRA [Hu et al., 2021] and QLoRA [Dettmers et al., 2023] can be used to fine-tune LLMs. More recently, techniques such as Direct Preference Optimization (DPO) [Rafailov et al., 2024] have been developed to further align LLMs with human preferences.

                    +

                    This will be the focus of the next Chapter where we will explore the process of aligning language models with human preferences.

                    +
                    +
                    +
                    +

                    5.5. Technical Implementation Components

                    +
                    +

                    5.5.1. Datasets

                    +
                      +
                    • SALADBench

                    • +
                    • https://huggingface.co/datasets/Anthropic/hh-rlhf

                    • +
                    • ABC

                    • +
                    • use of synthetic datasets

                    • +
                    +
                    +
                    +

                    5.5.2. Tools

                    +

                    Filtering:

                    +
                      +
                    • Webpurify

                    • +
                    • LLM-Guard

                    • +
                    • AWS Comprehend

                    • +
                    +

                    LM-Based:

                    +
                      +
                    • OpenAI Moderation API

                    • +
                    • IBM Granite Guardian: https://github.com/ibm-granite/granite-guardian

                    • +
                    • Llama-Guard

                    • +
                    • NeMo Guardrails

                    • +
                    • Mistral moderation: https://github.com/mistralai/cookbook/blob/main/mistral/moderation/system-level-guardrails.ipynb

                    • +
                    +
                    +

                    5.5.2.1. Filter-based

                    +
                    +
                    +

                    5.5.2.2. LLM-based

                    +
                    +
                    +
                    +

                    5.5.3. Benchmarks

                    +
                    +
                    +
                    +

                    5.6. Case Study: Making Mistral 7B Harmless

                    +
                    -

                    5.3. References

                    -
                    -
                    -[BHY+24] +

                    5.7. References

                    +
                    +
                    +[AI24] +

                    Meta AI. Llamaguard: llm-based input-output safeguard for human-ai conversations. Meta AI Research Publications, 2024. URL: https://ai.meta.com/research/publications/llama-guard-llm-based-input-output-safeguard-for-human-ai-conversations/.

                    +
                    +
                    +[ASA24] +

                    Jide Alaga, Jonas Schuett, and Markus Anderljung. A grading rubric for ai safety frameworks. 2024. URL: https://arxiv.org/abs/2409.08751, arXiv:2409.08751.

                    +
                    +
                    +[ABC+23] +(1,2) +

                    Amanda Askell, Yuntao Bai, Anna Chen, Deep Ganguli, Danny Hernandez, Jared Kaplan, Jackson Kernion, Ben Mann, Catherine Olsson, and Paul Christiano. Constitutional ai: harmlessness from ai feedback. 2023. URL: https://www.anthropic.com/research/constitutional-ai-harmlessness-from-ai-feedback.

                    +
                    +
                    +[BJN+22] +

                    Yuntao Bai, Andy Jones, Kamal Ndousse, Amanda Askell, Anna Chen, Nova DasSarma, Dawn Drain, Stanislav Fort, Deep Ganguli, Tom Henighan, Nicholas Joseph, Saurav Kadavath, Jackson Kernion, Tom Conerly, Sheer El-Showk, Nelson Elhage, Zac Hatfield-Dodds, Danny Hernandez, Tristan Hume, Scott Johnston, Shauna Kravec, Liane Lovitt, Neel Nanda, Catherine Olsson, Dario Amodei, Tom Brown, Jack Clark, Sam McCandlish, Chris Olah, Ben Mann, and Jared Kaplan. Training a helpful and harmless assistant with reinforcement learning from human feedback. 2022. URL: https://arxiv.org/abs/2204.05862, arXiv:2204.05862.

                    +
                    +
                    +[BHY+24]

                    Yoshua Bengio, Geoffrey Hinton, Andrew Yao, Dawn Song, Pieter Abbeel, Trevor Darrell, Yuval Noah Harari, Ya-Qin Zhang, Lan Xue, Shai Shalev-Shwartz, Gillian Hadfield, Jeff Clune, Tegan Maharaj, Frank Hutter, Atılım Güneş Baydin, Sheila McIlraith, Qiqi Gao, Ashwin Acharya, David Krueger, Anca Dragan, Philip Torr, Stuart Russell, Daniel Kahneman, Jan Brauner, and Sören Mindermann. Managing extreme ai risks amid rapid progress. Science, 384(6698):842–845, 2024. URL: https://www.science.org/doi/abs/10.1126/science.adn0117, arXiv:https://www.science.org/doi/pdf/10.1126/science.adn0117, doi:10.1126/science.adn0117.

                    -
                    -[BBC+24] +
                    +[BBC+24] +(1,2)

                    Victoria Benjamin, Emily Braca, Israel Carter, Hafsa Kanchwala, Nava Khojasteh, Charly Landow, Yi Luo, Caroline Ma, Anna Magarelli, Rachel Mirin, Avery Moyer, Kayla Simpson, Amelia Skawinski, and Thomas Heverin. Systematically analyzing prompt injection vulnerabilities in diverse llm architectures. 2024. URL: https://arxiv.org/abs/2410.23308, arXiv:2410.23308.

                    -
                    -[BMC+24] +
                    +[BMC+24] +(1,2)

                    Dillon Bowen, Brendan Murphy, Will Cai, David Khachaturov, Adam Gleave, and Kellin Pelrine. Data poisoning in llms: jailbreak-tuning and scaling laws. 2024. URL: https://arxiv.org/abs/2408.02946, arXiv:2408.02946.

                    -
                    +
                    +[CMM+24] +

                    Erik Cambria, Lorenzo Malandri, Fabio Mercorio, Navid Nobani, and Andrea Seveso. Xai meets llms: a survey of the relation between explainable ai and large language models. 2024. URL: https://arxiv.org/abs/2407.15248, arXiv:2407.15248.

                    +
                    +
                    +[DPHZ23] +

                    Tim Dettmers, Artidoro Pagnoni, Ari Holtzman, and Luke Zettlemoyer. Qlora: efficient finetuning of quantized llms. 2023. URL: https://arxiv.org/abs/2305.14314, arXiv:2305.14314.

                    +
                    +
                    [Edg24] -(1,2) +(1,2)

                    Alec Edgington. How to exploit large language models for good or bad. SIAM News, 2024. URL: https://www.siam.org/publications/siam-news/articles/how-to-exploit-large-language-models-for-good-or-bad/.

                    -
                    -[GRB+24] +
                    +[Exa24] +(1,2) +

                    Exabeam. Ai regulations and llm regulations: past, present, and future. Exabeam Blog, 2024. URL: https://www.exabeam.com/explainers/ai-cyber-security/ai-regulations-and-llm-regulations-past-present-and-future/.

                    +
                    +
                    +[GRB+24]

                    Isabel O. Gallegos, Ryan A. Rossi, Joe Barrow, Md Mehrab Tanjim, Sungchul Kim, Franck Dernoncourt, Tong Yu, Ruiyi Zhang, and Nesreen K. Ahmed. Bias and fairness in large language models: a survey. 2024. URL: https://arxiv.org/abs/2309.00770, arXiv:2309.00770.

                    -
                    +
                    [HGP+22]

                    Thomas Hartvigsen, Saadia Gabriel, Hamid Palangi, Maarten Sap, Dipankar Ray, and Ece Kamar. ToxiGen: a large-scale machine-generated dataset for adversarial and implicit hate speech detection. In Smaranda Muresan, Preslav Nakov, and Aline Villavicencio, editors, Proceedings of the 60th Annual Meeting of the Association for Computational Linguistics (Volume 1: Long Papers), 3309–3326. Dublin, Ireland, May 2022. Association for Computational Linguistics. URL: https://aclanthology.org/2022.acl-long.234, doi:10.18653/v1/2022.acl-long.234.

                    -
                    -[HYM+24] +
                    +[HSW+21] +

                    Edward J. Hu, Yelong Shen, Phillip Wallis, Zeyuan Allen-Zhu, Yuanzhi Li, Shean Wang, Lu Wang, and Weizhu Chen. Lora: low-rank adaptation of large language models. 2021. URL: https://arxiv.org/abs/2106.09685, arXiv:2106.09685.

                    +
                    +
                    +[HYM+24] +(1,2)

                    Lei Huang, Weijiang Yu, Weitao Ma, Weihong Zhong, Zhangyin Feng, Haotian Wang, Qianglong Chen, Weihua Peng, Xiaocheng Feng, Bing Qin, and Ting Liu. A survey on hallucination in large language models: principles, taxonomy, challenges, and open questions. ACM Transactions on Information Systems, November 2024. URL: http://dx.doi.org/10.1145/3703155, doi:10.1145/3703155.

                    -
                    +
                    [OAA+24]

                    OpenAI, Josh Achiam, Steven Adler, Sandhini Agarwal, Lama Ahmad, Ilge Akkaya, Florencia Leoni Aleman, Diogo Almeida, Janko Altenschmidt, Sam Altman, Shyamal Anadkat, Red Avila, Igor Babuschkin, Suchir Balaji, Valerie Balcom, Paul Baltescu, Haiming Bao, Mohammad Bavarian, Jeff Belgum, Irwan Bello, Jake Berdine, Gabriel Bernadett-Shapiro, Christopher Berner, Lenny Bogdonoff, Oleg Boiko, Madelaine Boyd, Anna-Luisa Brakman, Greg Brockman, Tim Brooks, Miles Brundage, Kevin Button, Trevor Cai, Rosie Campbell, Andrew Cann, Brittany Carey, Chelsea Carlson, Rory Carmichael, Brooke Chan, Che Chang, Fotis Chantzis, Derek Chen, Sully Chen, Ruby Chen, Jason Chen, Mark Chen, Ben Chess, Chester Cho, Casey Chu, Hyung Won Chung, Dave Cummings, Jeremiah Currier, Yunxing Dai, Cory Decareaux, Thomas Degry, Noah Deutsch, Damien Deville, Arka Dhar, David Dohan, Steve Dowling, Sheila Dunning, Adrien Ecoffet, Atty Eleti, Tyna Eloundou, David Farhi, Liam Fedus, Niko Felix, Simón Posada Fishman, Juston Forte, Isabella Fulford, Leo Gao, Elie Georges, Christian Gibson, Vik Goel, Tarun Gogineni, Gabriel Goh, Rapha Gontijo-Lopes, Jonathan Gordon, Morgan Grafstein, Scott Gray, Ryan Greene, Joshua Gross, Shixiang Shane Gu, Yufei Guo, Chris Hallacy, Jesse Han, Jeff Harris, Yuchen He, Mike Heaton, Johannes Heidecke, Chris Hesse, Alan Hickey, Wade Hickey, Peter Hoeschele, Brandon Houghton, Kenny Hsu, Shengli Hu, Xin Hu, Joost Huizinga, Shantanu Jain, Shawn Jain, Joanne Jang, Angela Jiang, Roger Jiang, Haozhun Jin, Denny Jin, Shino Jomoto, Billie Jonn, Heewoo Jun, Tomer Kaftan, Łukasz Kaiser, Ali Kamali, Ingmar Kanitscheider, Nitish Shirish Keskar, Tabarak Khan, Logan Kilpatrick, Jong Wook Kim, Christina Kim, Yongjik Kim, Jan Hendrik Kirchner, Jamie Kiros, Matt Knight, Daniel Kokotajlo, Łukasz Kondraciuk, Andrew Kondrich, Aris Konstantinidis, Kyle Kosic, Gretchen Krueger, Vishal Kuo, Michael Lampe, Ikai Lan, Teddy Lee, Jan Leike, Jade Leung, Daniel Levy, Chak Ming Li, Rachel Lim, Molly Lin, Stephanie Lin, Mateusz Litwin, Theresa Lopez, Ryan Lowe, Patricia Lue, Anna Makanju, Kim Malfacini, Sam Manning, Todor Markov, Yaniv Markovski, Bianca Martin, Katie Mayer, Andrew Mayne, Bob McGrew, Scott Mayer McKinney, Christine McLeavey, Paul McMillan, Jake McNeil, David Medina, Aalok Mehta, Jacob Menick, Luke Metz, Andrey Mishchenko, Pamela Mishkin, Vinnie Monaco, Evan Morikawa, Daniel Mossing, Tong Mu, Mira Murati, Oleg Murk, David Mély, Ashvin Nair, Reiichiro Nakano, Rajeev Nayak, Arvind Neelakantan, Richard Ngo, Hyeonwoo Noh, Long Ouyang, Cullen O'Keefe, Jakub Pachocki, Alex Paino, Joe Palermo, Ashley Pantuliano, Giambattista Parascandolo, Joel Parish, Emy Parparita, Alex Passos, Mikhail Pavlov, Andrew Peng, Adam Perelman, Filipe de Avila Belbute Peres, Michael Petrov, Henrique Ponde de Oliveira Pinto, Michael, Pokorny, Michelle Pokrass, Vitchyr H. Pong, Tolly Powell, Alethea Power, Boris Power, Elizabeth Proehl, Raul Puri, Alec Radford, Jack Rae, Aditya Ramesh, Cameron Raymond, Francis Real, Kendra Rimbach, Carl Ross, Bob Rotsted, Henri Roussez, Nick Ryder, Mario Saltarelli, Ted Sanders, Shibani Santurkar, Girish Sastry, Heather Schmidt, David Schnurr, John Schulman, Daniel Selsam, Kyla Sheppard, Toki Sherbakov, Jessica Shieh, Sarah Shoker, Pranav Shyam, Szymon Sidor, Eric Sigler, Maddie Simens, Jordan Sitkin, Katarina Slama, Ian Sohl, Benjamin Sokolowsky, Yang Song, Natalie Staudacher, Felipe Petroski Such, Natalie Summers, Ilya Sutskever, Jie Tang, Nikolas Tezak, Madeleine B. Thompson, Phil Tillet, Amin Tootoonchian, Elizabeth Tseng, Preston Tuggle, Nick Turley, Jerry Tworek, Juan Felipe Cerón Uribe, Andrea Vallone, Arun Vijayvergiya, Chelsea Voss, Carroll Wainwright, Justin Jay Wang, Alvin Wang, Ben Wang, Jonathan Ward, Jason Wei, CJ Weinmann, Akila Welihinda, Peter Welinder, Jiayi Weng, Lilian Weng, Matt Wiethoff, Dave Willner, Clemens Winter, Samuel Wolrich, Hannah Wong, Lauren Workman, Sherwin Wu, Jeff Wu, Michael Wu, Kai Xiao, Tao Xu, Sarah Yoo, Kevin Yu, Qiming Yuan, Wojciech Zaremba, Rowan Zellers, Chong Zhang, Marvin Zhang, Shengjia Zhao, Tianhao Zheng, Juntang Zhuang, William Zhuk, and Barret Zoph. Gpt-4 technical report. 2024. URL: https://arxiv.org/abs/2303.08774, arXiv:2303.08774.

                    -
                    -[VSK+24] +
                    +[PHS+22] +(1,2) +

                    Ethan Perez, Saffron Huang, Francis Song, Trevor Cai, Roman Ring, John Aslanides, Amelia Glaese, Nat McAleese, and Geoffrey Irving. Red teaming language models with language models. 2022. URL: https://arxiv.org/abs/2202.03286, arXiv:2202.03286.

                    +
                    +
                    +[RSM+24] +

                    Rafael Rafailov, Archit Sharma, Eric Mitchell, Stefano Ermon, Christopher D. Manning, and Chelsea Finn. Direct preference optimization: your language model is secretly a reward model. 2024. URL: https://arxiv.org/abs/2305.18290, arXiv:2305.18290.

                    +
                    +
                    +[SZW+24] +

                    Oliver J. Sutton, Qinghua Zhou, Wei Wang, Desmond J. Higham, Alexander N. Gorban, Alexander Bastounis, and Ivan Y. Tyukin. Stealth edits to large language models. 2024. URL: https://arxiv.org/abs/2406.12670, arXiv:2406.12670.

                    +
                    +
                    +[VAA+24] +

                    Bertie Vidgen, Adarsh Agrawal, Ahmed M. Ahmed, Victor Akinwande, Namir Al-Nuaimi, Najla Alfaraj, Elie Alhajjar, Lora Aroyo, Trupti Bavalatti, Max Bartolo, Borhane Blili-Hamelin, Kurt Bollacker, Rishi Bomassani, Marisa Ferrara Boston, Siméon Campos, Kal Chakra, Canyu Chen, Cody Coleman, Zacharie Delpierre Coudert, Leon Derczynski, Debojyoti Dutta, Ian Eisenberg, James Ezick, Heather Frase, Brian Fuller, Ram Gandikota, Agasthya Gangavarapu, Ananya Gangavarapu, James Gealy, Rajat Ghosh, James Goel, Usman Gohar, Sujata Goswami, Scott A. Hale, Wiebke Hutiri, Joseph Marvin Imperial, Surgan Jandial, Nick Judd, Felix Juefei-Xu, Foutse Khomh, Bhavya Kailkhura, Hannah Rose Kirk, Kevin Klyman, Chris Knotz, Michael Kuchnik, Shachi H. Kumar, Srijan Kumar, Chris Lengerich, Bo Li, Zeyi Liao, Eileen Peters Long, Victor Lu, Sarah Luger, Yifan Mai, Priyanka Mary Mammen, Kelvin Manyeki, Sean McGregor, Virendra Mehta, Shafee Mohammed, Emanuel Moss, Lama Nachman, Dinesh Jinenhally Naganna, Amin Nikanjam, Besmira Nushi, Luis Oala, Iftach Orr, Alicia Parrish, Cigdem Patlak, William Pietri, Forough Poursabzi-Sangdeh, Eleonora Presani, Fabrizio Puletti, Paul Röttger, Saurav Sahay, Tim Santos, Nino Scherrer, Alice Schoenauer Sebag, Patrick Schramowski, Abolfazl Shahbazi, Vin Sharma, Xudong Shen, Vamsi Sistla, Leonard Tang, Davide Testuggine, Vithursan Thangarasa, Elizabeth Anne Watkins, Rebecca Weiss, Chris Welty, Tyler Wilbers, Adina Williams, Carole-Jean Wu, Poonam Yadav, Xianjun Yang, Yi Zeng, Wenhui Zhang, Fedor Zhdanov, Jiacheng Zhu, Percy Liang, Peter Mattson, and Joaquin Vanschoren. Introducing v0.5 of the ai safety benchmark from mlcommons. 2024. URL: https://arxiv.org/abs/2404.12241, arXiv:2404.12241.

                    +
                    +
                    +[VSK+24] +(1,2)

                    Bertie Vidgen, Nino Scherrer, Hannah Rose Kirk, Rebecca Qian, Anand Kannappan, Scott A. Hale, and Paul Röttger. Simplesafetytests: a test suite for identifying critical safety risks in large language models. 2024. URL: https://arxiv.org/abs/2311.08370, arXiv:2311.08370.

                    -
                    -[ZYY+24] +
                    +[WMR24] +

                    Sandra Wachter, Brent Mittelstadt, and Chris Russell. Do large language models have a legal duty to tell the truth? Royal Society Open Science, 11(8):240197, 2024. URL: https://royalsocietypublishing.org/doi/abs/10.1098/rsos.240197, arXiv:https://royalsocietypublishing.org/doi/pdf/10.1098/rsos.240197, doi:10.1098/rsos.240197.

                    +
                    +
                    +[ZYY+24]

                    Shuning Zhang, Lyumanshan Ye, Xin Yi, Jingyu Tang, Bo Shui, Haobin Xing, Pengfei Liu, and Hewu Li. "ghost of the past": identifying and resolving privacy leakage from llm's memory through proactive user interaction. 2024. URL: https://arxiv.org/abs/2410.14931, arXiv:2410.14931.

                    -
                    -[Zho24] +
                    +[Zho24]

                    Qinghua Zhou. Stealth edits: detecting stealth edits in llm outputs. Hugging Face Spaces, 2024. URL: https://huggingface.co/spaces/qinghua-zhou/stealth-edits.

                    +
                    +[Anthropic24] +

                    Anthropic. Anthropic's responsible scaling policy. Technical Report, Anthropic, 2024. URL: https://www-cdn.anthropic.com/1adf000c8f675958c2ee23805d91aaade1cd4613/responsible-scaling-policy.pdf.

                    +
                    + +
                    +[EuropeanMAgency24] +

                    European Medicines Agency. Guiding principles for the use of large language models in regulatory science and medicines regulatory activities. Guidance Document, European Medicines Agency, 2024. URL: https://www.ema.europa.eu/en/documents/other/guiding-principles-use-large-language-models-regulatory-science-medicines-regulatory-activities_en.pdf.

                    +
                    +
                    +[FinancialIRAuthority24] +

                    Financial Industry Regulatory Authority. Artificial intelligence, including large language models and generative ai. Regulatory Notice 24-09, FINRA, 2024. URL: https://www.finra.org/rules-guidance/notices/24-09.

                    +
                    +
                    +[LibraryoCongress23] +

                    Library of Congress. China: generative ai measures finalized. July 2023. URL: https://www.loc.gov/item/global-legal-monitor/2023-07-18/china-generative-ai-measures-finalized/.

                    +
                    +
                    +[NationalIoSaTechnology24] +

                    National Institute of Standards and Technology. Ai risk management framework. Technical Report, National Institute of Standards and Technology, 2024. URL: https://www.nist.gov/itl/ai-risk-management-framework.

                    +
                    +
                    +[OpenAI24] +

                    OpenAI. Openai preparedness framework. Technical Report, OpenAI, 2024. URL: https://cdn.openai.com/openai-preparedness-framework-beta.pdf.

                    +
                    +
                    +[UKGovernment24] +

                    UK Government. Ai regulation: a pro-innovation approach. White Paper, Department for Science, Innovation and Technology, 2024. URL: https://www.gov.uk/government/publications/ai-regulation-a-pro-innovation-approach/white-paper.

                    +
                    +
                    +[UNICEF24] +

                    UNICEF. Policy guidance on ai for children. Policy Report, UNICEF Office of Research - Innocenti, 2024. URL: https://www.unicef.org/innocenti/reports/policy-guidance-ai-children.

                    +
                    diff --git a/tamingllms/_build/html/notebooks/structured_output.html b/tamingllms/_build/html/notebooks/structured_output.html index a189f19..410afc2 100644 --- a/tamingllms/_build/html/notebooks/structured_output.html +++ b/tamingllms/_build/html/notebooks/structured_output.html @@ -214,7 +214,7 @@
                    -

                    3. Wrestling with Structured Output

                    +

                    3. Wrestling with Structured Output

                    In limits, there is freedom. Creativity thrives within structure.

                    —Julia B. Cameron

                    @@ -222,42 +222,42 @@

                    Contents

                    -

                    3.1. Introduction

                    +

                    3.1. Introduction

                    Large language models (LLMs) excel at generating human-like text, but they often struggle to produce output in a structured format consistently. This poses a significant challenge when we need LLMs to generate data that can be easily processed by other systems, such as databases, APIs, or other software applications. Sometimes, even with a well-crafted prompt, an LLM might produce an unstructured response when a structured one is expected. This can be particularly challenging when integrating LLMs into systems that require specific data formats.

                    As a motivating example, consider the following simple task: Given a segment of a SEC financial filing, generate a two-person discussion about the key financial data from the text in JSON format, simulating what would be a real-world discussion about the underlying companies’ disclosed financial information. We would like to generate a structured output that can be easily parsed and integrated with other systems.

                    Throughout this notebook, we will consider as input a segment of a sample SEC filing of Apple Inc.

                    @@ -363,7 +363,7 @@

                    -

                    3.2. Problem Statement

                    +

                    3.2. Problem Statement

                    Obtaining structured output from LLMs presents several significant challenges:

                    • Inconsistency: LLMs often produce unpredictable results, sometimes generating well-structured output and other times deviating from the expected format.

                    • @@ -372,7 +372,7 @@

                      -

                      3.3. User Needs

                      +

                      3.3. User Needs

                      What user needs drive the demand for LLM output constraints when building LLM-based applications? In a recent work by Google Research [Liu et al., 2024], the authors explore the user need for constraints on the output of large language models, drawing on a survey of 51 industry professionals who use LLMs in their work. These needs can be broadly categorized as follows:

                      1. Improving Developer Efficiency and Workflow

                        @@ -395,10 +395,10 @@

                        -

                        3.4. Solutions

                        +

                        3.4. Solutions

                        Several strategies and tools can be employed to address the challenges of structured output from LLMs.

                        -

                        3.4.1. Strategies

                        +

                        3.4.1. Strategies

                        • Schema Guidance: Providing the LLM with a clear schema or blueprint of the desired output structure helps to constrain its generation and improve consistency. This can be achieved by using tools like Pydantic to define the expected data structure and then using that definition to guide the LLM’s output.

                        • Output Parsing: When LLMs don’t natively support structured output, parsing their text output using techniques like regular expressions or dedicated parsing libraries can extract the desired information. For example, you can use regular expressions to extract specific patterns from the LLM’s output, or you can use libraries like Pydantic to parse the output into structured data objects.

                        • @@ -406,9 +406,9 @@

                          -

                          3.4.2. Techniques and Tools

                          +

                          3.4.2. Techniques and Tools

                          -

                          3.4.2.1. One-Shot Prompts

                          +

                          3.4.2.1. One-Shot Prompts

                          In one-shot prompting, you provide a single example of the desired output format within the prompt.

                          @@ -475,7 +475,7 @@

                          -

                          3.4.2.2. Structured Output with Provider-Specific APIs

                          +

                          3.4.2.2. Structured Output with Provider-Specific APIs

                          One-shot prompting is a simple technique that can lead to material improvements in structured output, though may not be sufficient for complex (e.g. nested) structures and / or when the model’s output needs to be restricted to a specific set of options or types.

                          Provider-specific APIs can offer ways to handle those challenges. We will explore two approaches here using OpenAI’s API:

                            @@ -484,7 +484,7 @@

                            -

                            3.4.2.3. JSON Mode

                            +

                            3.4.2.3. JSON Mode

                            JSON mode is a feature provided by most LLM API providers, such as OpenAI, that allows the model to generate output in JSON format. This is particularly useful when you need structured data as a result, such as when parsing the output programmatically or integrating it with other systems that require JSON input. As depicted in Fig. 3.1, JSON mode is implemented by instructing theLLM model to use JSON as response format and optionally defining a target schema.

                            JSON Mode @@ -622,7 +622,7 @@

                            -

                            3.4.3. LangChain

                            +

                            3.4.3. LangChain

                            LangChain is a framework designed to simplify the development of LLM applications. It provider an abstraction layer over many LLM providers, including OpenAI, that offers several tools for parsing structured output.

                            In particular, LangChain offers the with_structured_output method, which can be used with LLMs that support structured output APIs, allowing you to enforce a schema directly within the prompt.

                            @@ -682,7 +682,7 @@

                            .with_structured_output() can be found here.

                          -

                          3.4.4. Outlines

                          +

                          3.4.4. Outlines

                          Outlines [Outlines, 2024] is a library specifically focused on structured text generation from LLMs. Under the hood, Outlines works by adjusting the probability distribution of the model’s output logits - the raw scores from the final layer of the neural network that are normally converted into text tokens. By introducing carefully crafted logit biases, Outlines can guide the model to prefer certain tokens over others, effectively constraining its outputs to a predefined set of valid options.

                          The authors solve the general guided generation problem [Willard and Louf, 2023], which as a consequence solves the problem of structured output generation, in LLMs by introducing an efficient indexing approach that reformulates neural text generation using finite-state machines (FSMs).

                          They define the next token generation as a random variable:

                          @@ -825,7 +825,7 @@

                          -

                          3.4.5. Ollama

                          +

                          3.4.5. Ollama

                          Ollama is a popular tool that allows you to run large language models (LLMs) locally. It has recently added support for structured output generation. The current ollama implementation leverages llama.cpp GBNF (GGML BNF) grammars [Ggerganov, 2024] to enable structured output generation.

                          llama.cpp GBNF forces language models to generate output in specific, predefined formats by constraining their outputs to follow precise rules and patterns. The system accomplishes this through a formal grammar specification that defines exactly how valid outputs can be constructed. It’s essentially an extension of BNF (Backus-Naur Form) [Wikipedia contributors, 2024] with some modern regex-like features added. These rules carefully define what elements are allowed, how they can be combined, and what patterns of repetition and sequencing are valid. By enforcing these constraints during generation, GBNF ensures the model’s output strictly adheres to the desired format.

                          Ollama first introduced structured output generation in version 0.5.1 providing support for JSON output but highlighting additional formats are coming soon.

                          @@ -923,9 +923,9 @@

                          -

                          3.5. Discussion

                          +

                          3.5. Discussion

                          -

                          3.5.1. Comparing Solutions

                          +

                          3.5.1. Comparing Solutions

                          The choice of framework for structured LLM output depends heavily on specific constraints, requirements and use cases. LangChain is the most used LLM framework today with a large developer community base however its structured output support depends on the underlying LLM provider support. Ollama enables straightforward local deployment and experimentation democratizing access to LLMs while fostering privacy and control, however today it only offers JSON format with further formats to come. Outlines emerges as a solution with great flexibility and control over output structure while providing support for a wide range of LLMs. Table 3.1 provides a summary comparison of the different frameworks.

                @@ -971,7 +971,7 @@

                -

                3.5.2. Best Practices

                +

                3.5.2. Best Practices

                • Clear Schema Definition: Define the desired output structure clearly. This can be done in several ways including schemas, types, or Pydantic models as appropriate. This ensures the LLM knows exactly what format is expected.

                • Descriptive Naming: Use meaningful names for fields and elements in your schema. This makes the output more understandable and easier to work with.

                • @@ -980,7 +980,7 @@

                  -

                  3.5.3. Research and Ongoing Debate

                  +

                  3.5.3. Research and Ongoing Debate

                  The use of structured output for Large Language Models (LLMs) is a developing area. While the ability to constrain LLM outputs offer clear benefits in parsing, robustness, and integration, there is growing debate on whether it also potentially comes at the cost of performance as well as reasoning abilities. Research in this area should be taken with a grain of salt since findings are mixed and often depend on the specific task and model family at hand furthermore model families are not always comparable and are getting updated by the day! Nonetheless, early findings provide some interesting insights as to why there is no one-size-fits-all solution when it comes to LLMs structured output.

                  There is some evidence indicating that LLMs may have bias in their handling of different output formats [Long et al., 2024]. The study examined common output structures like multiple-choice answers, wrapped text, lists, and key-value mappings. The authors analyzed key LLM model families, namely Gemma, Mistral, and ChatGPT, uncovering bias across multiple tasks and formats. The researchers attributed these biases to the models’ underlying token distributions for different formats. An example of this format bias emerged in the comparison between JSON and YAML outputs. While models like Mistral and Gemma excelled at generating JSON structures, they performed notably worse with YAML. Their YAML outputs often contained extraneous information that degrades output quality. This disparity likely stems from JSON’s prevalence in training data, highlighting how a format’s popularity directly influences model performance. While the studied models can be probably considered outdated by now since models are getting updated on a rapidly fashion, it is important to remark that addressing format bias is critical for advancing LLMs and ensuring their reliable application in real-world scenarios.

                  Recent research “Let Me Speak Freely? A Study on the Impact of Format Restrictions on Performance of Large Language Models” [Tam et al., 2024] suggests that imposing format restrictions on LLMs might impact their performance, particularly in reasoning-intensive tasks. Further evidence [Aider, 2024] suggests LLMs may produce lower quality code if they’re asked to return it as part of a structured JSON response, in particular:

                  @@ -1010,15 +1010,15 @@

                  -

                  3.6. Conclusion

                  +

                  3.6. Conclusion

                  Extracting structured output from LLMs is crucial for integrating them into real-world applications. By understanding the challenges and employing appropriate strategies and tools, developers can improve the reliability and usability of LLM-powered systems, unlocking their potential to automate complex tasks and generate valuable insights.

                  -

                  3.7. Acknowledgements

                  +

                  3.7. Acknowledgements

                  We would like to thank Cameron Pfiffer from the .txt team for his insightful review and feedback.

                  -

                  3.8. References

                  +

                  3.8. References

                  [Aid24] diff --git a/tamingllms/_build/html/objects.inv b/tamingllms/_build/html/objects.inv index cae5d59..0c8f256 100644 Binary files a/tamingllms/_build/html/objects.inv and b/tamingllms/_build/html/objects.inv differ diff --git a/tamingllms/_build/html/searchindex.js b/tamingllms/_build/html/searchindex.js index 99b1954..59c0f5a 100644 --- a/tamingllms/_build/html/searchindex.js +++ b/tamingllms/_build/html/searchindex.js @@ -1 +1 @@ -Search.setIndex({"docnames": ["markdown/intro", "markdown/toc", "notebooks/alignment", "notebooks/evals", "notebooks/output_size_limit", "notebooks/safety", "notebooks/structured_output"], "filenames": ["markdown/intro.md", "markdown/toc.md", "notebooks/alignment.ipynb", "notebooks/evals.ipynb", "notebooks/output_size_limit.ipynb", "notebooks/safety.ipynb", "notebooks/structured_output.ipynb"], "titles": ["1. Introduction", "Taming LLMs", "6. Preference-Based Alignment", "4. The Evals Gap", "2. Output Size Limitations", "5. Safety", "3. Wrestling with Structured Output"], "terms": {"am": 0, "alwai": [0, 2, 3, 6], "do": [0, 2, 3, 4, 6], "which": [0, 2, 3, 4, 5, 6], "cannot": [0, 2, 3], "order": [0, 2, 3, 6], "mai": [0, 2, 3, 4, 5, 6], "learn": [0, 2, 3], "how": [0, 2, 3, 4, 5, 6], "pablo": [0, 3], "picasso": 0, "In": [0, 2, 3, 4, 5, 6], "recent": [0, 2, 3, 5, 6], "year": [0, 1, 2, 3, 4, 6], "larg": [0, 1, 2, 3, 4, 5, 6], "languag": [0, 1, 3, 4, 5, 6], "model": [0, 1, 5, 6], "llm": [0, 2, 4, 6], "have": [0, 2, 3, 4, 5, 6], "emerg": [0, 1, 2, 5, 6], "transform": [0, 2, 3, 6], "forc": [0, 3, 6], "technologi": [0, 3, 4, 6], "promis": [0, 2, 3], "revolution": 0, "build": [0, 1, 2, 3, 4, 6], "product": [0, 1, 2, 3, 6], "interact": [0, 2, 3, 4, 5, 6], "comput": [0, 2, 3, 4, 5, 6], "from": [0, 3, 4, 5, 6], "chatgpt": [0, 2, 6], "github": [0, 1, 2, 3, 6], "copilot": 0, "claud": [0, 2, 3, 4], "artifact": 0, "system": [0, 2, 3, 4, 5, 6], "captur": [0, 2, 3], "public": [0, 2, 3, 5], "imagin": 0, "spark": 0, "gold": [0, 2, 3], "rush": 0, "ai": [0, 2, 3, 6], "power": [0, 1, 2, 3, 4, 5, 6], "applic": [0, 1, 2, 4, 5, 6], "howev": [0, 2, 3, 4, 5, 6], "beneath": 0, "surfac": [0, 3], "technolog": [0, 3, 5], "revolut": 0, "li": [0, 2, 3, 5], "complex": [0, 2, 3, 4, 5, 6], "landscap": [0, 2, 3], "practition": [0, 3], "must": [0, 2, 3, 4], "navig": [0, 1, 3], "focus": [0, 2, 3, 4, 5, 6], "bring": [0, 2], "awar": [0, 3, 4], "limit": [0, 2, 3, 5, 6], "har": [0, 1, 3, 4], "solut": [0, 1, 3, 4], "overcom": [0, 3, 4], "them": [0, 2, 3, 4, 5, 6], "robust": [0, 2, 3, 4, 5, 6], "It": [0, 2, 3, 4, 6], "offer": [0, 2, 3, 4, 5, 6], "critic": [0, 1, 2, 3, 4, 5, 6], "implement": [0, 1, 2, 3, 4, 6], "back": [0, 3, 6], "reproduc": [0, 1, 3], "exampl": [0, 1, 2, 3, 5, 6], "while": [0, 1, 2, 3, 4, 5, 6], "mani": [0, 2, 3, 4, 6], "resourc": [0, 2, 3, 4, 5], "cover": [0, 2, 3, 4], "capabl": [0, 1, 3, 4, 5, 6], "specif": [0, 1, 2, 3, 4], "hidden": 0, "pitfal": [0, 2], "engin": [0, 1, 2, 3, 6], "technic": [0, 1, 2, 3, 4, 5, 6], "manag": [0, 1, 3, 4, 5, 6], "face": [0, 2, 3, 5], "when": [0, 1, 2, 3, 4, 5, 6], "comprehens": [0, 1, 2, 3, 4, 6], "guid": [0, 2, 3, 6], "leverag": [0, 2, 3, 4, 6], "battl": [0, 1], "test": [0, 1, 2, 5, 6], "tool": [0, 2, 4, 5], "throughout": [0, 3, 4, 6], "tackl": [0, 2, 3], "follow": [0, 2, 3, 4, 5, 6], "non": [0, 1, 2, 6], "exhaust": 0, "list": [0, 2, 3, 4, 6], "structur": [0, 2, 3, 4], "un": 0, "reliabl": [0, 2, 3, 6], "struggl": [0, 3, 6], "maintain": [0, 2, 3, 4, 5, 6], "consist": [0, 2, 3, 4, 6], "output": [0, 2, 3, 5], "format": [0, 2, 3, 4, 6], "complic": 0, "integr": [0, 2, 3, 6], "larger": [0, 2, 3, 4, 6], "make": [0, 2, 3, 4, 5, 6], "error": [0, 2, 3, 6], "handl": [0, 1, 2, 3, 4, 5, 6], "more": [0, 2, 3, 4, 5, 6], "size": [0, 2, 3, 6], "length": [0, 2, 3, 6], "constraint": [0, 1, 2, 3, 4, 6], "strict": [0, 6], "token": [0, 1, 2, 3, 6], "both": [0, 2, 3, 5], "input": [0, 2, 3, 4, 5, 6], "requir": [0, 2, 4, 6], "care": [0, 2, 3, 5, 6], "chunk": [0, 1, 2], "strategi": [0, 1, 2, 3, 4], "long": [0, 1, 2, 3, 5, 6], "form": [0, 1, 2, 3, 6], "effect": [0, 2, 3, 4, 5, 6], "tradit": [0, 2], "softwar": [0, 6], "methodologi": [0, 2, 3, 6], "break": [0, 2, 3, 4], "down": [0, 3, 4], "deal": [0, 2], "determinist": [0, 1, 6], "gener": [0, 1, 6], "new": [0, 1, 2, 3, 4, 5, 6], "hallucin": [0, 2, 3, 5, 6], "These": [0, 2, 3, 4, 5, 6], "can": [0, 2, 3, 4, 5, 6], "plausibl": 0, "sound": 0, "entir": [0, 3, 4, 6], "fabric": [0, 3, 5], "inform": [0, 2, 3, 4, 5, 6], "creat": [0, 2, 3, 4, 5, 6], "signific": [0, 2, 3, 4, 5, 6], "risk": [0, 2, 3, 4], "safeti": [0, 2, 3, 6], "align": [0, 3, 4, 6], "harm": [0, 2, 3], "bias": [0, 2, 3, 5, 6], "inappropri": [0, 2], "safeguard": [0, 3, 5], "monitor": [0, 1, 2, 3], "ensur": [0, 2, 3, 4, 6], "safe": [0, 2, 3, 6], "deploy": [0, 1, 2, 3, 6], "cost": [0, 2, 3, 6], "optim": [0, 1, 3, 4], "The": [0, 2, 4, 5, 6], "financi": [0, 2, 3, 4, 6], "oper": [0, 2, 3, 4, 5, 6], "base": [0, 6], "quickli": [0, 2, 4], "becom": [0, 3, 5, 6], "prohibit": [0, 2, 3], "without": [0, 2, 3, 4, 5, 6], "observ": [0, 2, 3, 6], "vendor": [0, 1, 3], "lock": [0, 1], "cloud": [0, 2, 3, 6], "provid": [0, 2, 3, 4, 5], "depend": [0, 2, 3, 6], "through": [0, 1, 2, 3, 4, 5, 6], "proprietari": [0, 2, 6], "infrastructur": 0, "difficult": [0, 2, 3, 5], "switch": 0, "self": [0, 1, 2, 3], "host": [0, 1, 3], "take": [0, 1, 2, 3, 4, 6], "hand": [0, 4, 6], "focu": [0, 1, 2, 3, 4, 5, 6], "access": [0, 2, 3, 4, 5, 6], "all": [0, 2, 3, 4, 5, 6], "ar": [0, 1, 2, 3, 5, 6], "fulli": [0, 2, 3, 4], "document": [0, 3, 4, 6], "allow": [0, 3, 4, 6], "reader": [0, 1], "replic": [0, 3, 6], "result": [0, 2, 3, 4, 5, 6], "exactli": [0, 3, 6], "design": [0, 1, 2, 4, 5, 6], "run": [0, 2, 3, 6], "consum": [0, 2, 3, 6], "grade": [0, 2, 3], "hardwar": [0, 2, 3], "expens": [0, 2, 3], "avail": [0, 2, 3, 4, 5, 6], "notebook": [0, 2, 6], "modifi": [0, 3], "extend": [0, 2, 3, 6], "built": [0, 3, 6], "us": [0, 2, 4, 5, 6], "free": [0, 2, 3, 5], "everyon": [0, 3], "minim": [0, 2, 3, 5, 6], "framework": [0, 2, 3, 5], "wai": [0, 2, 3, 4, 5, 6], "priorit": [0, 2, 3, 5], "transpar": [0, 2, 3, 5, 6], "visibl": [0, 3], "being": [0, 2, 3], "better": [0, 1, 2, 3, 4], "understand": [0, 1, 2, 3, 4, 5, 6], "custom": [0, 2, 3, 5], "flexibl": [0, 3, 4, 6], "adapt": [0, 2, 3, 4, 5], "case": [0, 1, 3, 4, 5, 6], "unlik": [0, 2, 3], "black": [0, 2], "box": 0, "commerci": [0, 2, 3, 6], "most": [0, 2, 3, 4, 6], "freeli": [0, 6], "foster": [0, 2, 3, 6], "reduc": [0, 2, 3, 4, 6], "independ": [0, 3, 6], "freedom": [0, 6], "architectur": [0, 2, 3, 4, 5, 6], "decis": [0, 2, 3, 6], "keep": [0, 2, 3, 4], "principl": [0, 2, 3, 5], "itself": [0, 2, 3], "live": [0, 3, 5], "evolv": [0, 2, 3, 4], "chang": [0, 2, 3, 5], "encourag": [0, 2, 3, 6], "report": [0, 1, 2, 3, 5, 6], "suggest": [0, 2, 3, 6], "improv": [0, 2, 3, 4, 5, 6], "contribut": [0, 3, 4, 5], "via": [0, 2, 3, 6], "pull": 0, "request": [0, 2, 3, 4, 6], "share": [0, 2, 3, 5, 6], "own": [0, 2, 3, 4], "experi": [0, 2, 3, 4, 6], "commun": [0, 1, 2, 3, 6], "propos": [0, 3], "chapter": [0, 2, 3, 5], "section": [0, 2, 3, 4, 5, 6], "found": [0, 3, 6], "http": [0, 1, 2, 3, 4, 5, 6], "com": [0, 1, 2, 3, 4, 6], "souzatharsi": [0, 1, 2], "tamingllm": [0, 1, 2], "whether": [0, 2, 3, 4, 6], "you": [0, 2, 3, 4, 6], "ve": 0, "typo": [0, 5], "want": [0, 2, 4, 6], "welcom": 0, "look": [0, 1, 2, 3], "our": [0, 2, 3, 4, 5, 6], "goal": [0, 2, 3, 4, 5], "discourag": 0, "enabl": [0, 2, 3, 4, 5, 6], "By": [0, 1, 2, 3, 4, 6], "upfront": [0, 1], "equip": [0, 1, 3], "avoid": [0, 2, 3, 6], "current": [0, 1, 2, 3, 4, 6], "discours": [0, 1, 5], "around": [0, 1, 2, 3, 4, 5, 6], "tend": [0, 1, 3], "toward": [0, 2, 3, 6], "extrem": [0, 2, 3, 5], "either": [0, 2, 3, 4], "uncrit": 0, "enthusiasm": 0, "wholesal": [0, 3], "dismiss": 0, "differ": [0, 2, 3, 4, 6], "rather": [0, 2, 3], "than": [0, 2, 3], "theoret": 0, "examin": [0, 2, 3, 4, 6], "first": [0, 2, 3, 4, 6], "everi": [0, 3], "concept": [0, 2, 3, 5], "illustr": [0, 2, 3, 4, 5, 6], "execut": [0, 3], "immedi": [0, 2, 3], "analysi": [0, 1, 2, 3, 4], "balanc": [0, 2, 3, 4, 6], "help": [0, 2, 3, 4, 6], "intend": [0, 3], "develop": [0, 2, 3, 4, 5, 6], "step": [0, 1, 2, 3, 6], "insight": [0, 2, 3, 4, 6], "along": [0, 2, 3], "guidanc": [0, 2, 6], "could": [0, 2, 3, 4, 5, 6], "derail": 0, "project": [0, 2, 3], "earli": [0, 2, 3, 6], "befor": [0, 2, 3, 5, 6], "thei": [0, 2, 3, 4, 5, 6], "costli": [0, 3], "problem": [0, 1, 2], "too": [0, 2, 3, 4, 5], "late": [0, 2], "lifecycl": 0, "lead": [0, 2, 3, 4, 5, 6], "genai": [0, 2], "initi": [0, 2, 3, 4, 6], "leader": [0, 3], "advoc": 0, "anyon": [0, 5], "seek": [0, 3], "work": [0, 1, 2, 3, 4, 5, 6], "typic": [0, 2, 3, 4, 6], "job": [0, 3, 5], "role": [0, 2, 3, 4, 5, 6], "platform": [0, 3, 4, 6], "backend": [0, 2, 3], "exist": [0, 2, 3], "ml": 0, "transit": [0, 3, 4, 6], "overse": 0, "motiv": [0, 3, 6], "need": [0, 2, 3, 4, 5], "readi": [0, 3], "desir": [0, 2, 3, 6], "perform": [0, 1, 2, 3, 4, 5, 6], "after": [0, 2, 3, 4, 6], "read": [0, 2, 3, 4, 6], "implic": [0, 1, 2, 3], "recommend": [0, 2, 3, 4, 6], "abl": [0, 2, 3, 4, 6], "deploi": [0, 2, 3, 4, 5], "proper": [0, 2, 5, 6], "realist": [0, 2, 5], "effort": [0, 3, 6], "estim": [0, 3], "impact": [0, 2, 3, 4, 6], "timelin": 0, "To": [0, 2, 3, 4, 5, 6], "should": [0, 2, 3, 4, 6], "basic": [0, 2, 3, 4], "program": [0, 3], "knowledg": [0, 2, 3], "introductori": [0, 1], "langchain": [0, 1, 3, 4], "e": [0, 2, 3, 4, 5, 6], "g": [0, 2, 3, 4, 5, 6], "chat": [0, 2, 3, 4, 6], "prompt": [0, 1, 3, 5], "templat": [0, 1, 3], "openai": [0, 2, 3, 5, 6], "anthrop": [0, 6], "similar": [0, 2, 3, 6], "dive": 0, "here": [0, 1, 2, 3, 4, 6], "get": [0, 2, 3, 4, 6], "start": [0, 2, 3, 5, 6], "activ": [0, 2, 3, 5], "virtual": [0, 3], "m": [0, 2, 3, 6], "venv": 0, "tame": [0, 2], "env": [0, 2, 3, 4, 6], "bin": 0, "On": [0, 1, 3, 6], "window": [0, 1, 3], "script": 0, "instal": [0, 2, 3, 6], "packag": [0, 3, 6], "pip": [0, 2, 3, 6], "r": [0, 2, 3, 4, 6], "txt": [0, 3, 4, 6], "file": [0, 2, 3, 4, 6], "root": [0, 2], "directori": [0, 3], "add": [0, 2, 3, 4], "other": [0, 2, 3, 4, 5, 6], "sensit": [0, 2, 3, 5], "openai_api_kei": [0, 2], "your_openai_api_key_her": 0, "never": [0, 6], "commit": [0, 2, 3], "version": [0, 2, 3, 6], "control": [0, 2, 3, 5, 6], "contain": [0, 2, 3, 4, 6], "kept": [0, 3], "privat": [0, 3], "clone": [0, 2], "companion": 0, "git": 0, "cd": 0, "If": [0, 2, 3, 6], "encount": [0, 1, 3], "rate": [0, 2, 3], "consid": [0, 2, 3, 4, 5, 6], "smaller": [0, 2, 3, 4, 6], "retri": [0, 6], "logic": [0, 2, 3, 4], "conflict": [0, 3], "try": [0, 2, 3, 6], "fresh": 0, "like": [0, 2, 3, 4, 5, 6], "poetri": 0, "check": [0, 3, 6], "page": [0, 3], "known": [0, 3, 5, 6], "now": [0, 2, 3, 4, 5, 6], "let": [0, 2, 3, 4, 6], "begin": [0, 2, 3, 6], "explor": [0, 2, 3, 5, 6], "dr": 0, "tharsi": [0, 1, 2], "souza": [0, 1, 2], "scientist": [0, 5], "special": [0, 3, 6], "he": [0, 2, 3, 5], "lectur": 0, "columbia": 0, "univers": [0, 3], "master": [0, 6], "scienc": [0, 2, 3, 5], "appli": [0, 2, 3, 4, 6], "analyt": [0, 5], "incom": [0, 3], "head": [0, 2, 3, 4], "equiti": [0, 3], "citadel": 0, "former": [0, 3], "senior": [0, 3], "vp": 0, "two": [0, 2, 3, 4, 6], "sigma": [0, 2], "invest": [0, 2, 3, 6], "also": [0, 2, 3, 4, 5, 6], "enjoi": 0, "mentor": 0, "under": [0, 2, 3, 6], "repres": [0, 2, 3, 6], "student": [0, 2], "profession": [0, 2, 3, 6], "divers": [0, 2, 3, 4, 5, 6], "global": [0, 3], "ecosystem": [0, 3], "With": [0, 3], "over": [0, 1, 2, 3, 4, 5, 6], "15": [0, 3, 6], "deliv": [0, 3], "across": [0, 2, 3, 6], "startup": 0, "fortun": 0, "500": [0, 2, 3], "compani": [0, 2, 3, 4, 5, 6], "numer": [0, 3], "scholarli": 0, "frequent": [0, 3, 6], "speaker": [0, 3], "academ": [0, 2, 3], "busi": [0, 3], "confer": [0, 6], "ground": [0, 1, 2, 3], "background": [0, 3, 4], "draw": [0, 3, 5, 6], "scale": [0, 2, 3, 5, 6], "stage": [0, 6], "major": [0, 2, 3, 6], "institut": [0, 3], "well": [0, 2, 3, 5, 6], "advis": [0, 2], "profit": [0, 3, 4, 6], "organ": [0, 2, 3, 4], "uniqu": [0, 2, 3, 5], "bridg": 0, "gap": [0, 2], "between": [0, 2, 3, 4, 6], "potenti": [0, 2, 3, 4, 5, 6], "next": [0, 2, 3, 6], "hold": [0, 2, 3], "ph": 0, "d": [0, 2, 3, 6], "ucl": 0, "london": 0, "phil": [0, 5], "sc": 0, "b": [0, 3, 5, 6], "sign": [1, 3], "up": [1, 2, 3, 4, 6], "receiv": [1, 2, 3, 4, 6], "updat": [1, 2, 3, 4, 6], "abstract": [1, 3, 6], "heavili": [1, 3, 6], "gloss": 1, "fundament": [1, 3, 6], "challeng": [1, 2, 3, 4, 5, 6], "convers": [1, 2, 3, 4, 5, 6], "thi": [1, 2, 3, 4, 5, 6], "book": [1, 3], "kei": [1, 2, 5, 6], "python": [1, 3, 4, 6], "proven": 1, "an": [1, 2, 3, 4, 5, 6], "yet": [1, 2, 3, 4, 5], "i": [1, 2, 3, 4, 5, 6], "concret": [1, 5], "unstructur": [1, 6], "context": [1, 2, 3, 4, 5, 6], "code": [1, 2, 3, 5, 6], "sidestep": 1, "inher": [1, 2, 3, 6], "core": [1, 3], "we": [1, 2, 3, 4, 5, 6], "ll": [1, 2, 3], "address": [1, 2, 3, 4, 5, 6], "approach": [1, 2, 3, 4, 5, 6], "note": [1, 3, 4, 6], "perspect": 1, "who": [1, 2, 3, 4, 6], "For": [1, 2, 3, 4, 5, 6], "outcom": [1, 2, 3, 5, 6], "prerequisit": 1, "set": [1, 2, 3, 4, 6], "your": [1, 2, 3, 4, 6], "environ": [1, 2, 3, 4, 6], "setup": [1, 3, 6], "api": [1, 2, 3], "configur": [1, 2, 3], "repositori": [1, 2, 3], "troubleshoot": 1, "common": [1, 2, 3, 4, 6], "issu": [1, 2, 3, 4, 5, 6], "about": [1, 2, 3, 4, 5, 6], "author": [1, 2, 3, 5, 6], "": [1, 2, 3, 4, 5, 6], "statement": [1, 5], "techniqu": [1, 2, 3, 4, 5], "One": [1, 2, 3, 5], "shot": [1, 3], "json": [1, 2, 3, 4], "mode": 1, "outlin": [1, 3], "multipl": [1, 2, 3, 4, 6], "choic": [1, 2, 3, 6], "pydant": [1, 2, 3, 6], "discuss": [1, 3, 5], "compar": [1, 2, 3, 4], "best": [1, 2, 3], "research": [1, 2, 3, 4, 5], "ongo": [1, 3], "debat": 1, "conclus": 1, "acknowledg": [1, 3], "refer": 1, "pattern": [1, 2, 3, 6], "content": 1, "what": [1, 2, 3, 6], "contextu": [1, 3], "link": [1, 3], "write": [1, 2, 3, 6], "construct": [1, 2, 3, 6], "dynam": [1, 2, 3], "paramet": [1, 2, 3, 5, 6], "usag": [1, 2, 3, 6], "futur": [1, 2, 3, 5], "consider": [1, 2, 5, 6], "machin": [1, 2, 5, 6], "temperatur": [1, 2, 3, 4, 6], "sampl": [1, 2, 4, 6], "spectrum": [1, 3], "properti": 1, "conceptu": [1, 6], "overview": [1, 5, 6], "compon": [1, 2, 3], "metric": [1, 2], "evalu": [1, 4, 6], "human": [1, 3, 4, 5, 6], "benchmark": [1, 2], "leaderboard": 1, "type": [1, 2, 3, 4, 6], "detect": [1, 3, 5, 6], "retriev": [1, 3], "augment": [1, 3], "rag": 1, "select": [1, 2, 3], "index": [1, 2, 3, 4, 6], "vector": 1, "store": [1, 2, 3, 4], "method": [1, 2, 3, 4, 5, 6], "pipelin": [1, 2, 3, 6], "valid": [1, 2, 3, 6], "raw": [1, 3, 6], "misalign": 1, "supervis": [1, 3], "fine": [1, 3, 6], "tune": [1, 3, 5, 6], "sft": 1, "studi": [1, 6], "polici": [1, 3], "cach": [1, 3], "invalid": [1, 6], "predict": [1, 2, 3, 6], "llama": [1, 2, 3, 5, 6], "llamafil": 1, "ollama": 1, "migrat": 1, "misc": [1, 2], "tharsistpsouza2024tamingllm": [1, 2], "t": [1, 2, 3, 4, 5, 6], "p": [1, 2, 3, 6], "titl": [1, 2, 3], "2024": [1, 2, 3, 4, 5, 6], "journal": [1, 2, 3, 6], "url": [1, 2, 3, 5, 6], "peopl": [2, 3], "valu": [2, 3, 4, 6], "its": [2, 3, 4, 5, 6], "privileg": 2, "abov": [2, 3], "soon": [2, 6], "lose": [2, 3], "dwight": 2, "eisenhow": 2, "releas": [2, 3, 5, 6], "3": [2, 3, 5, 6], "5": [2, 3, 4, 5, 6], "2022": [2, 3, 5], "mark": [2, 3, 5], "pivot": [2, 3], "moment": 2, "histori": [2, 3], "artifici": [2, 3], "intellig": [2, 3], "within": [2, 3, 4, 5, 6], "just": [2, 3, 4, 6], "five": [2, 3], "dai": [2, 3, 5, 6], "launch": [2, 3], "attract": [2, 3], "million": [2, 3], "month": [2, 3], "becam": 2, "fastest": [2, 3], "grow": [2, 3, 6], "100": [2, 3, 6], "monthli": [2, 3], "rais": [2, 3, 4, 5], "intrigu": 2, "question": [2, 3, 5, 6], "why": [2, 3, 6], "did": [2, 3, 6], "dramat": [2, 3, 6], "predecessor": 2, "gpt": [2, 3, 4, 5, 6], "had": [2, 3], "same": [2, 3, 4, 6], "number": [2, 3, 4, 6], "far": [2, 4], "less": [2, 3], "attent": 2, "arguabl": 2, "answer": [2, 3, 4, 6], "feedback": [2, 3, 6], "abil": [2, 3, 5, 6], "least": [2, 3], "ey": 2, "breakthrough": [2, 5], "demonstr": [2, 3, 4, 5, 6], "crucial": [2, 6], "greater": [2, 3], "process": [2, 3, 4, 5, 6], "modern": [2, 3, 4, 6], "direct": [2, 3, 5], "rafailov": 2, "et": [2, 3, 5, 6], "al": [2, 3, 5, 6], "present": [2, 3, 4, 5, 6], "practic": [2, 3, 4], "where": [2, 3, 4, 5, 6], "autom": [2, 3, 5, 6], "fashion": [2, 6], "open": [2, 3, 4, 5, 6], "sourc": [2, 3, 5, 6], "pre": [2, 3], "train": [2, 3, 5, 6], "default": [2, 3, 6], "becaus": [2, 3], "state": [2, 3, 4, 6], "art": [2, 3], "object": [2, 3, 6], "given": [2, 3, 4, 6], "webpag": 2, "internet": [2, 3], "veri": [2, 3], "ask": [2, 3, 6], "instruct": [2, 3, 4, 5, 6], "sai": [2, 6], "ouyang": [2, 5], "2": [2, 3, 5, 6], "explain": 2, "moon": 2, "land": [2, 3], "6": [2, 3, 4, 6], "old": [2, 3], "import": [2, 3, 4, 5, 6], "pipe": 2, "text": [2, 3, 4, 6], "gpt2": [2, 3], "msg": 2, "short": [2, 3, 4, 6], "sentenc": [2, 3, 4, 6], "_": [2, 3, 6], "rang": [2, 3, 4, 5, 6], "len": [2, 3, 4], "print": [2, 3, 4, 6], "f": [2, 3, 4, 6], "n": [2, 3, 4, 6], "1": [2, 3, 5, 6], "0": [2, 3, 4, 6], "generated_text": 2, "good": [2, 3, 5, 6], "idea": 2, "one": [2, 3, 4, 5, 6], "those": [2, 3, 4, 5, 6], "littl": [2, 3], "green": [2, 5], "dot": 2, "out": [2, 3, 4, 6], "Then": [2, 3], "line": [2, 3], "later": [2, 3, 6], "re": [2, 3, 4, 6], "alreadi": [2, 3], "movi": 2, "end": [2, 3, 4, 6], "theori": [2, 3], "some": [2, 3, 4, 5, 6], "go": [2, 3, 4, 6], "mean": [2, 3, 4, 6], "word": [2, 3, 4, 6], "tepid": 2, "articl": [2, 3, 4, 5], "sure": [2, 3, 4, 6], "lunar": 2, "As": [2, 3, 4, 6], "see": [2, 3, 6], "fail": [2, 3, 5], "coher": [2, 3, 4], "explan": [2, 3, 6], "child": [2, 3], "nonsens": [2, 5], "meander": 2, "unrel": [2, 3, 5], "topic": [2, 3, 4, 6], "simpl": [2, 3, 4, 5, 6], "lack": [2, 3, 5, 6], "clear": [2, 3, 6], "appropri": [2, 3, 4, 5, 6], "young": [2, 3], "instead": [2, 3, 4, 6], "introduc": [2, 3, 4, 6], "rlhf": 2, "intent": [2, 5], "wide": [2, 3, 4, 6], "task": [2, 4, 6], "fig": [2, 3, 4, 5, 6], "collect": [2, 3, 4], "label": [2, 3, 6], "behavior": [2, 3, 5], "comparison": 2, "reward": [2, 3, 5], "sever": [2, 3, 4, 6], "rank": [2, 3], "worst": 2, "rm": 2, "reinforc": [2, 3], "stori": 2, "frog": 2, "calcul": [2, 3], "score": [2, 3, 6], "ppo": 2, "proxim": 2, "iter": [2, 3, 4, 6], "accur": [2, 3, 6], "undesir": [2, 5], "simplifi": [2, 3, 6], "view": [2, 3], "show": [2, 3, 4, 5, 6], "progress": [2, 4, 5], "ha": [2, 3, 5, 6], "instanc": [2, 3, 4, 5], "further": [2, 3, 4, 5, 6], "directli": [2, 3, 6], "guard": 2, "team": [2, 3, 6], "8b": [2, 5], "wa": [2, 3, 6], "classif": [2, 3, 6], "bypass": [2, 5], "similarli": [2, 3, 5], "zephyr": 2, "7b": [2, 3, 5], "alpha": [2, 3, 6], "mistral": [2, 5, 6], "publicli": [2, 3, 6], "assist": [2, 3, 6], "paper": [2, 3, 5, 6], "particular": [2, 3, 5, 6], "foundat": [2, 3, 4], "advanc": [2, 3, 4, 5, 6], "strong": [2, 3, 6], "At": [2, 3, 6], "high": [2, 3, 4, 6], "level": [2, 3, 4, 6], "involv": [2, 3, 5, 6], "carefulli": [2, 3, 5, 6], "curat": [2, 3], "purpos": [2, 3, 6], "exhibit": [2, 3, 5], "domain": [2, 3], "emploi": [2, 3, 6], "prove": [2, 3], "particularli": [2, 3, 4, 5, 6], "valuabl": [2, 3, 6], "scenario": [2, 3, 5, 6], "precis": [2, 3, 6], "style": [2, 3], "tone": 2, "expertis": [2, 3, 5], "medic": [2, 3], "legal": [2, 3], "field": [2, 3, 6], "adher": [2, 3, 4, 6], "guidelin": [2, 3], "servic": [2, 3, 4, 5, 6], "standard": [2, 3, 5], "each": [2, 3, 4, 6], "distinct": [2, 3], "advantag": [2, 3, 4, 6], "full": [2, 3, 6], "weight": [2, 3], "maximum": [2, 3, 4], "lora": 2, "low": [2, 3, 5, 6], "hu": [2, 5], "2021": [2, 3], "small": [2, 3, 6], "matric": 2, "effici": [2, 3, 4, 6], "qlora": 2, "quantiz": 2, "dettmer": 2, "2023": [2, 3, 6], "combin": [2, 3, 4, 6], "memori": [2, 3, 4, 5], "footprint": 2, "even": [2, 3, 4, 5, 6], "modest": 2, "increas": [2, 3, 4, 6], "likelihood": [2, 3], "obtain": [2, 3, 6], "probabl": [2, 3, 6], "hong": [2, 3], "therefor": [2, 3, 5], "unintend": [2, 5], "suboptim": 2, "seen": [2, 3], "been": [2, 3, 5], "maxim": [2, 3], "shown": [2, 3], "alon": [2, 3], "gain": [2, 3], "achiev": [2, 3, 6], "bai": [2, 3], "touvron": 2, "sinc": [2, 3, 4, 5, 6], "main": [2, 3, 4, 6], "categori": [2, 3], "algorithm": [2, 3, 5], "meanwhil": 2, "superior": [2, 3], "xu": [2, 3, 5], "schulman": [2, 5], "2017": [2, 3], "popular": [2, 6], "understood": 2, "rule": [2, 3, 4, 6], "govern": [2, 3, 5], "reflect": [2, 3, 5], "anoth": [2, 3], "adjust": [2, 3, 4, 6], "strength": [2, 3], "2024c": 2, "real": [2, 3, 4, 5, 6], "world": [2, 3, 5, 6], "noisi": 2, "delai": [2, 3], "chatbot": [2, 3, 5], "subsequ": [2, 6], "situat": [2, 3, 4], "clip": 2, "surrog": 2, "function": [2, 3, 4, 6], "stabl": [2, 3], "prevent": [2, 3, 5, 6], "overreact": 2, "converg": 2, "due": [2, 3, 4, 5], "simplic": 2, "award": [2, 3], "runner": 2, "neurip": 2, "blog": [2, 3, 6], "4": [2, 3, 5, 6], "fit": [2, 3, 4, 6], "pair": [2, 3], "rl": 2, "find": [2, 3, 4, 6], "contrast": [2, 3], "satisfi": [2, 3], "implicit": [2, 3, 5], "whose": [2, 3], "correspond": [2, 3, 6], "extract": [2, 3, 4, 5, 6], "close": [2, 3], "against": [2, 3, 5], "assign": [2, 3, 6], "higher": [2, 3], "kl": 2, "diverg": 2, "origin": [2, 3, 4, 6], "preserv": [2, 4], "defin": [2, 3, 4, 5, 6], "equat": 2, "gather": [2, 3], "mathcal": 2, "l": [2, 3], "pi_": 2, "theta": [2, 6], "ref": 2, "mathbb": [2, 6], "x": [2, 3], "y_w": 2, "y_l": 2, "sim": [2, 6], "left": 2, "log": [2, 3], "beta": [2, 3, 6], "underbrac": 2, "frac": 2, "color": [2, 3], "red": [2, 5], "right": [2, 3], "straightforward": [2, 3, 4, 6], "librari": [2, 3, 4, 6], "huggingfac": [2, 3, 5], "trl": 2, "2024d": 2, "suit": [2, 3, 5], "includ": [2, 3, 4, 5, 6], "friendli": [2, 3, 4], "interfac": [2, 3], "featur": [2, 3, 6], "solv": [2, 3, 6], "describ": [2, 3], "assum": [2, 3, 4], "acm": [2, 5], "inc": [2, 3, 4, 6], "dedic": [2, 3, 6], "democrat": [2, 3, 6], "educ": [2, 3, 4], "k": [2, 3, 4, 5, 6], "12": [2, 3, 4], "name": [2, 3, 4, 6], "smolk": 2, "walk": 2, "measur": [2, 3, 5], "huggingfacetb": 2, "360m": [2, 3], "compact": [2, 3], "part": [2, 3, 4, 6], "famili": [2, 6], "publish": [2, 6], "local": [2, 3, 4, 6], "infer": [2, 3, 5], "remot": [2, 3], "load": [2, 3, 4, 6], "eventu": [2, 3], "util": [2, 3, 4], "your_openai_api_kei": 2, "reusabl": 2, "decid": [2, 3, 4], "anchor": 2, "worth": [2, 3], "reason": [2, 3, 4, 6], "lightweight": [2, 3, 6], "suitabl": [2, 3], "devic": [2, 3, 6], "Its": [2, 3], "excel": [2, 3, 6], "candid": [2, 3], "said": [2, 3], "necessarili": [2, 3], "par": [2, 3], "mind": [2, 3], "factual": [2, 3, 5], "inaccuraci": [2, 3], "possibl": [2, 3, 6], "inconsist": [2, 3, 6], "guardrail": 2, "articul": 2, "uphold": 2, "employe": [2, 3], "stakehold": [2, 3], "expect": [2, 3, 4, 6], "regard": [2, 3], "ethic": [2, 3, 5], "conduct": [2, 3], "social": [2, 3, 5], "onli": [2, 3, 4, 5, 6], "mission": 2, "vision": [2, 3], "cultur": [2, 3], "account": [2, 3], "codifi": 2, "action": [2, 3, 4, 5], "establish": [2, 3, 5], "mlcommon": 2, "vidgen": [2, 5], "encompass": 2, "seven": 2, "hazard": [2, 3], "violent": 2, "crime": 2, "sex": 2, "relat": [2, 3], "sexual": 2, "exploit": [2, 3, 5], "indiscrimin": 2, "weapon": [2, 5], "chemic": 2, "biolog": 2, "radiolog": 2, "nuclear": [2, 3], "yield": [2, 3], "explos": 2, "cbrne": 2, "suicid": 2, "hate": [2, 5], "speech": [2, 5], "below": [2, 3, 4, 6], "markdown": [2, 3, 4], "written": [2, 3], "english": [2, 4], "o": [2, 3, 4, 5, 6], "ipython": [2, 3], "displai": [2, 3, 6], "def": [2, 3, 4, 6], "load_polici": 2, "policy_path": 2, "path": [2, 3, 4], "join": [2, 3, 4], "genai_polici": 2, "md": [2, 3, 5, 6], "policy_cont": 2, "return": [2, 3, 4, 6], "classroom": 2, "accept": [2, 3], "unaccept": 2, "ag": [2, 3], "subject": [2, 3], "support": [2, 3, 6], "posit": [2, 3, 4, 6], "confid": [2, 3, 6], "inclus": [2, 3, 4, 6], "celebr": 2, "definit": [2, 3, 6], "creativ": [2, 3, 6], "math": [2, 3], "tip": 2, "digit": [2, 3], "literaci": 2, "onlin": [2, 3, 5], "histor": [2, 3], "violenc": 2, "physic": [2, 3], "fight": 2, "crimin": [2, 5], "illeg": 2, "glorifi": 2, "promot": [2, 3], "person": [2, 3, 5, 6], "eat": 2, "disord": 2, "danger": [2, 5], "diet": 2, "dare": 2, "advic": [2, 3, 5], "discriminatori": [2, 5], "bulli": 2, "harass": [2, 3], "target": [2, 3, 5, 6], "protect": [2, 3, 5], "group": [2, 3, 4], "religi": 2, "racial": [2, 3, 5], "ethnic": 2, "bia": [2, 3, 6], "gender": [2, 3, 5], "discrimin": [2, 3, 5], "adult": 2, "explicit": [2, 3, 6], "profan": 2, "relationship": [2, 3], "substanc": [2, 3], "drug": 2, "gambl": 2, "bet": 2, "protocol": [2, 3], "refus": [2, 6], "redirect": 2, "alert": 2, "necessari": [2, 3, 4], "record": [2, 3], "review": [2, 3, 6], "regular": [2, 3, 6], "audit": [2, 3], "teacher": 2, "parent": 2, "continu": [2, 3, 4, 6], "aim": [2, 3, 4, 6], "indic": [2, 3, 6], "compliant": 2, "violat": [2, 3, 5], "qualiti": [2, 3, 4, 6], "intens": [2, 3, 6], "demand": [2, 3, 5, 6], "especi": [2, 3, 4, 6], "dong": [2, 3], "There": [2, 3, 4, 5, 6], "replac": [2, 3], "rlaif": 2, "give": [2, 3], "rise": [2, 5], "kim": [2, 3, 5], "meta": [2, 3, 4], "wu": [2, 3, 5, 6], "scheme": 2, "inspir": 2, "schema": [2, 6], "row": [2, 3], "match": [2, 3, 6], "ones": 2, "boundari": [2, 3], "craft": [2, 3, 5, 6], "elicit": [2, 6], "unalign": 2, "serv": [2, 3, 4, 6], "panda": [2, 3], "chosen_responses_path": 2, "chosen_respons": 2, "csv": [2, 3], "rejected_responses_path": 2, "rejected_respons": 2, "chosen_responses_jsonl_path": 2, "batch_result": 2, "jsonl": 2, "dpo_dataset_s": 2, "5000": 2, "class": [2, 3, 4, 5, 6], "userpromptgener": 2, "might": [2, 3, 4, 5, 6], "explicitli": [2, 3], "pd": [2, 3], "basemodel": [2, 3, 6], "time": [2, 3, 4, 5, 6], "dotenv": [2, 3, 4, 6], "load_dotenv": [2, 3, 4, 6], "variabl": [2, 3, 4, 6], "overrid": [2, 6], "true": [2, 3, 4, 6], "userprompt": 2, "user_prompt": 2, "str": [2, 3, 4, 6], "__init__": 2, "4o": [2, 3, 4, 6], "mini": [2, 3, 4, 6], "client": [2, 3, 6], "_generate_prompt": 2, "batch": [2, 3], "system_prompt": 2, "complet": [2, 3, 4, 6], "pars": [2, 3, 6], "messag": [2, 3, 6], "response_format": [2, 3, 6], "except": [2, 3, 6], "generate_prompt": 2, "num_prompt": 2, "int": [2, 3, 4], "save_to_csv": 2, "call": [2, 3, 4, 5, 6], "arg": [2, 3, 4, 6], "option": [2, 3, 4, 6], "filepath": 2, "save": [2, 3, 4], "datafram": [2, 3], "all_prompt": 2, "respect": [2, 3], "sleep": 2, "enclos": 2, "quot": [2, 3], "startswith": 2, "els": [2, 3, 4], "df": [2, 3], "column": [2, 3], "to_csv": 2, "fals": [2, 3, 6], "user_prompt_gener": 2, "user_prompts_path": 2, "uneth": 2, "dishonesti": 2, "stalk": 2, "privaci": [2, 3, 6], "secur": [2, 3, 4, 6], "breach": [2, 3], "manipul": [2, 3, 5], "decept": [2, 3], "10": [2, 3, 4, 5, 6], "to_markdown": 2, "tell": 2, "me": [2, 6], "bomb": 2, "hurt": 2, "someon": 2, "caught": 2, "come": [2, 3, 4, 6], "plan": [2, 3, 5, 6], "cheat": 2, "fire": [2, 3], "household": 2, "item": [2, 3], "stunt": 2, "friend": 2, "notic": [2, 3, 6], "heard": 2, "school": 2, "7": [2, 3, 4], "8": [2, 3, 4], "teach": 2, "my": 2, "monei": [2, 3], "video": [2, 3], "game": [2, 3], "9": [2, 3], "convinc": 2, "skip": 2, "troubl": 2, "responsegener": 2, "properli": [2, 3, 6], "hug": [2, 3, 5], "instanti": [2, 3], "otherwis": [2, 3], "connect": [2, 3, 6], "endpoint": 2, "local_gener": 2, "model_nam": [2, 3, 4], "huggingface_model_nam": 2, "remote_gener": 2, "api_url": 2, "cloud_endpoint": 2, "recal": [2, 3], "enhanc": [2, 3, 4, 6], "visit": [2, 3], "ui": [2, 3, 6], "co": [2, 3, 5], "click": 2, "choos": [2, 3], "cpu": 2, "gpu": 2, "meaning": [2, 3, 4, 6], "region": [2, 3], "closest": [2, 3], "locat": [2, 3], "onc": [2, 3, 4, 5], "huggingface_hub": 2, "inferencecli": 2, "tokenizers_parallel": 2, "max_new_token": 2, "none": [2, 3], "generate_respons": [2, 3], "prompts_df": 2, "remov": [2, 3], "strip": [2, 3], "elif": [2, 4], "chat_complet": 2, "max_token": [2, 3], "seed": 2, "42": [2, 3], "append": [2, 3, 4, 6], "results_df": 2, "model_respons": 2, "your_api_url": 2, "user_prompts_df": 2, "read_csv": 2, "iloc": 2, "tolist": 2, "parallelevalu": 2, "taming_util": 2, "modul": [2, 3, 6], "parallel": [2, 3], "so": [2, 3, 6], "num_chunk": 2, "parallel_evalu": 2, "n_part": 2, "associ": [2, 3, 4, 6], "gladli": 2, "constitut": [2, 3], "would": [2, 3, 4, 5, 6], "dtype": [2, 3], "80": [2, 3], "absolut": [2, 3, 6], "materi": [2, 3, 5, 6], "plastic": 2, "food": 2, "lid": 2, "cut": [2, 3, 4], "swath": 2, "wood": 2, "squar": 2, "rectangular": 2, "piec": 2, "place": [2, 3, 6], "insid": [2, 3], "inch": 2, "inspect": [2, 3], "off": [2, 3, 4, 6], "demolit": 2, "scissor": 2, "smash": 2, "smooth": [2, 4], "arrang": [2, 3], "c": [2, 3, 6], "shape": [2, 5], "top": [2, 3, 6], "tuck": 2, "catch": 2, "ani": [2, 3, 4, 6], "hook": 2, "solid": 2, "side": [2, 3], "round": [2, 3], "edg": [2, 3], "outsid": [2, 3], "separ": [2, 3, 4], "sophist": [2, 3, 4, 5], "process_aligned_respons": 2, "strictli": [2, 6], "bound": [2, 3], "openaibatchprocessor": 2, "async": 2, "company_nam": 2, "save_filepath": 2, "dict": [2, 3, 4, 6], "enforc": [2, 3, 5, 6], "dictionari": [2, 3, 6], "aligned_suffix": 2, "sorri": 2, "compli": [2, 3, 6], "suffix": [2, 6], "processor": 2, "api_kei": [2, 3, 4], "getenv": 2, "max_requests_per_minut": 2, "1500": 2, "max_tokens_per_minut": 2, "125000": 2, "await": 2, "process_batch": 2, "total": [2, 3, 4, 6], "total_request": 2, "success": [2, 3, 6], "successful_request": 2, "failed_request": 2, "rate_limit_error": 2, "convert": [2, 3, 6], "fri": 2, "su": 2, "believ": [2, 3, 6], "quote_al": 2, "fall": [2, 3], "deem": [2, 3], "pertain": [2, 3], "point": [2, 3, 4], "generate_dpo_dataset": 2, "push": [2, 3], "hub": [2, 3], "repo_id": 2, "push_to_hub": [2, 3], "dpo_dataset": 2, "merg": [2, 4], "_chosen": 2, "_reject": 2, "transform_row": 2, "per": [2, 3, 4], "model_responses_chosen": 2, "model_responses_reject": 2, "seri": [2, 3], "axi": [2, 3], "drop": [2, 3], "hf_dpo_dataset": 2, "from_panda": 2, "duplic": 2, "interest": [2, 3, 4, 5, 6], "opt": 2, "login": 2, "thatupiso": 2, "smolk12": 2, "cli": [2, 3], "parquet": 2, "arrow": 2, "00": [2, 3], "153": [2, 3], "33ba": 2, "upload": [2, 3], "shard": 2, "02": 2, "35": [2, 3], "num_row": 2, "7158": 2, "nmateri": 2, "n1": [2, 3], "nstep": 2, "n2": [2, 3], "n3": [2, 3], "n4": [2, 3], "n5": [2, 3], "n6": 2, "n7": 2, "n8": [2, 3], "n9": [2, 3], "n10": [2, 3], "nnext": 2, "nthe": [2, 3], "rapid": [2, 3, 5], "singl": [2, 3, 4, 6], "48gb": 2, "a100": 2, "took": 2, "few": [2, 3, 4, 6], "minut": 2, "torch": 2, "h4": 2, "2024b": 2, "honest": [2, 3], "harmless": 2, "ultrafeedback": 2, "binar": 2, "lib": 2, "ultrafeedback_binar": 2, "2024a": 2, "criteria": [2, 3], "honesti": 2, "dimens": [2, 3], "blend": 2, "automodelforcausallm": 2, "autotoken": 2, "load_dataset": 2, "dpotrain": 2, "dpoconfig": 2, "dataset_k12": 2, "split": [2, 3, 4], "dataset_ultra": 2, "concatenate_dataset": 2, "remove_column": 2, "score_chosen": 2, "score_reject": 2, "shuffl": 2, "base_model": 2, "cuda": 2, "is_avail": 2, "mp": 2, "from_pretrain": 2, "pretrained_model_name_or_path": 2, "torch_dtyp": 2, "float32": 2, "config": [2, 3], "use_cach": 2, "pad_token": 2, "eos_token": 2, "finetun": 2, "finetune_nam": 2, "aligned_model": 2, "finetune_tag": 2, "from_smollm2": 2, "schedul": [2, 3], "learning_r": 2, "determin": [2, 3, 4, 6], "aggress": [2, 3], "empir": 2, "1e": [2, 4], "huyen": 2, "cosin": 2, "lr_scheduler_typ": 2, "stabil": [2, 3, 5], "gradual": 2, "decreas": [2, 3], "gradient": [2, 3], "accumul": [2, 3], "natur": [2, 3, 4, 6], "v": [2, 6], "16": [2, 3], "per_device_train_batch_s": 2, "simul": [2, 3, 6], "gradient_accumulation_step": 2, "strongli": [2, 6], "lower": [2, 3, 6], "conserv": 2, "overfit": 2, "warmup": 2, "max_step": 2, "1000": [2, 3], "often": [2, 3, 4, 5, 6], "suffic": 2, "20": [2, 3, 6], "warmup_step": 2, "stop": [2, 3, 4], "mix": [2, 3, 6], "bf16": 2, "checkpoint": 2, "gradient_checkpoint": 2, "200": [2, 3], "50": [2, 3], "training_results_dir": 2, "smolk12_dpo_output": 2, "dpo_config_path": 2, "dpo_config": 2, "yaml": [2, 3, 6], "pathlib": 2, "config_path": 2, "safe_load": [2, 3], "runtim": 2, "hub_model_id": 2, "use_mps_devic": 2, "output_dir": [2, 3], "training_arg": 2, "trainer": 2, "train_dataset": 2, "processing_class": 2, "max_prompt_length": 2, "1024": 2, "max_length": [2, 3, 6], "1536": 2, "sent": 2, "plot": [2, 3], "move": [2, 3, 4, 5], "averag": [2, 3, 6], "visual": [2, 3, 5], "distinguish": [2, 3], "dure": [2, 3, 5, 6], "bad": [2, 5], "reveal": [2, 3], "phase": [2, 3], "quick": [2, 3], "150": [2, 3], "curv": 2, "reach": [2, 3, 4, 6], "obviou": 2, "warrant": 2, "suffici": [2, 3, 6], "nuanc": [2, 3, 4, 6], "save_model": 2, "hf_token": 2, "tag": 2, "congratul": 2, "successfulli": [2, 3, 6], "card": [2, 3], "newli": 2, "u": [2, 3, 6], "qualit": [2, 3], "assess": [2, 3, 4], "rigor": [2, 3, 5], "quantit": [2, 3], "base_gener": 2, "aligned_gener": 2, "compare_model_respons": 2, "base_output": 2, "128": [2, 3], "aligned_output": 2, "pleas": [2, 3, 5], "gram": [2, 3], "tnt": 2, "highli": [2, 3, 5, 6], "regul": [2, 3, 5, 6], "law": [2, 3, 5], "degre": [2, 3], "mishandl": 2, "countri": [2, 3], "seriou": [2, 3, 5], "consequ": [2, 3, 5, 6], "imprison": 2, "death": 2, "variou": [2, 3, 4, 5, 6], "intern": [2, 3, 5], "nation": 2, "dictat": 2, "stark": [2, 3], "readili": [2, 3], "detail": [2, 3, 4, 6], "cite": 2, "concern": [2, 3, 5], "regulatori": [2, 3], "anecdot": 2, "evid": [2, 3, 6], "systemat": [2, 3, 5, 6], "quantifi": [2, 3, 5], "accuraci": [2, 3, 6], "f1": [2, 3], "experienc": [2, 3], "expert": [2, 3, 4, 6], "addition": [2, 3], "vari": [2, 3], "interpret": [2, 3], "adopt": [2, 3, 6], "judg": [2, 3], "act": [2, 3, 5], "summar": [2, 3, 4], "three": [2, 3], "togeth": [2, 4], "queri": [2, 3], "entri": [2, 3], "somewhat": 2, "databas": [2, 3, 6], "distribut": [2, 3, 6], "static": 2, "k12": 2, "base_model_api_url": 2, "aligned_model_api_url": 2, "base_model_responses_path": 2, "evals_base_model_respons": 2, "aligned_model_responses_path": 2, "evals_aligned_model_respons": 2, "num_sampl": 2, "previous": [2, 3, 4, 6], "eval_dataset": 2, "df_eval": 2, "to_panda": [2, 3], "lambda": 2, "prompts_ev": 2, "to_list": 2, "base_model_respons": 2, "aligned_model_respons": 2, "df_eval_respons": 2, "_base": 2, "_align": 2, "rememb": [2, 3], "heurist": 2, "charact": [2, 3, 4, 6], "longer": [2, 3], "minimum": [2, 3], "min_response_length": 2, "filter": [2, 3, 6], "string": [2, 3, 6], "df_eval_responses_clean": 2, "model_responses_bas": 2, "model_responses_align": 2, "homemad": 2, "kid": 2, "redact": 2, "punish": 2, "unit": [2, 3, 4, 6], "indonesia": 2, "saudi": 2, "arabia": 2, "attempt": [2, 3, 4], "offens": 2, "respond": [2, 3, 5], "rodrig": 2, "safetyjudg": 2, "evaluate_respons": 2, "condit": [2, 3], "tupl": [2, 3], "safetyscor": 2, "float": [2, 3, 4], "valueerror": [2, 6], "empti": 2, "scoring_guid": 2, "nrespons": 2, "safety_judg": 2, "test_respons": 2, "highlight": [2, 3, 4, 6], "emphas": [2, 3, 5, 6], "emphasi": [2, 3], "base_ev": 2, "zip": [2, 3], "aligned_ev": 2, "pose": [2, 3, 4, 5, 6], "injuri": [2, 3], "base_scor": 2, "eval": 2, "aligned_scor": 2, "base_df": 2, "aligned_df": 2, "model_typ": 2, "stack": 2, "evals_df_result": 2, "h": [2, 3, 5], "identifi": [2, 3, 4, 5, 6], "requ": 2, "statist": [2, 3], "naiv": [2, 4], "map": [2, 3, 6], "score_map": 2, "Not": [2, 3], "count": [2, 3, 4], "percentag": [2, 3], "score_base_freq": 2, "score_bas": 2, "value_count": 2, "reindex": 2, "fill_valu": 2, "score_base_pct": 2, "score_aligned_freq": 2, "score_align": 2, "score_aligned_pct": 2, "tabl": [2, 3, 4, 6], "md_tabl": 2, "335": [2, 3], "99": 2, "281": [2, 3], "83": [2, 3], "14": [2, 3, 6], "43": [2, 3], "explanation_bas": 2, "response_bas": 2, "model_type_bas": 2, "explanation_align": 2, "response_align": 2, "model_type_align": 2, "std": [2, 3], "base_mean": 2, "aligned_mean": 2, "3f": 2, "108": [2, 3], "231": [2, 3], "remain": [2, 3, 4, 5], "were": [2, 3, 6], "No": [2, 3, 6], "fell": 2, "partial": [2, 3, 4], "styliz": 2, "don": [2, 3, 4, 6], "wild": 2, "doe": [2, 3, 4, 6], "proof": 2, "taken": [2, 3, 6], "huang": [2, 3, 5], "overal": [2, 3, 4, 6], "reli": [2, 3], "annot": [2, 3], "scarc": 2, "recogn": [2, 3, 5], "mirror": [2, 3], "inaccur": [2, 3, 5, 6], "consecut": 2, "mitig": [2, 3, 4, 5, 6], "unrepres": 2, "hao": [2, 3], "accord": [2, 3, 6], "yin": 2, "resembl": 2, "declin": [2, 3], "volatil": [2, 3], "ineffici": [2, 3], "smollm": 2, "rel": [2, 3], "term": [2, 3, 4], "trade": [2, 3, 6], "weigh": 2, "altern": [2, 3, 4], "qwen": [2, 6], "remark": [2, 6], "rival": 2, "though": [2, 3, 6], "ultim": [2, 3], "threshold": [2, 3], "chen": [2, 3, 5, 6], "overli": [2, 3, 6], "fact": [2, 3], "simpli": [2, 3, 4, 6], "aspect": [2, 3, 4, 6], "neglect": [2, 3], "themselv": [2, 3], "actual": [2, 3, 4, 6], "complementari": 2, "throughput": 2, "screen": [2, 3], "flag": [2, 3], "preliminari": [2, 3], "relev": [2, 3], "judgment": [2, 3], "automat": [2, 3], "composit": [2, 3], "plai": [2, 3, 5, 6], "led": [2, 3, 6], "apologet": 2, "hesit": 2, "benign": 2, "apolog": 2, "inde": 2, "accordingli": [2, 3], "perhap": 2, "creation": [2, 4, 5], "invalu": 2, "factor": [2, 3, 4, 6], "hyperparamet": 2, "mention": [2, 3, 6], "significantli": [2, 3, 4], "optimist": 2, "memor": [2, 3], "generaliz": 2, "bjn": 2, "22": [2, 3, 5], "yuntao": [2, 3], "andi": [2, 3], "jone": [2, 3], "kamal": 2, "ndouss": 2, "amanda": [2, 3], "askel": [2, 3], "anna": [2, 3, 5], "nova": 2, "dassarma": 2, "dawn": [2, 3, 5], "drain": 2, "stanislav": 2, "fort": [2, 5], "deep": [2, 3, 6], "ganguli": [2, 3], "tom": [2, 3], "henighan": 2, "nichola": [2, 3], "joseph": [2, 3], "saurav": 2, "kadavath": 2, "jackson": [2, 3], "kernion": [2, 3], "conerli": 2, "sheer": [2, 6], "el": 2, "showk": 2, "nelson": 2, "elhag": 2, "zac": 2, "hatfield": 2, "dodd": 2, "danni": [2, 3], "hernandez": [2, 3], "tristan": 2, "hume": 2, "scott": [2, 3, 5], "johnston": 2, "shauna": 2, "kravec": 2, "lian": 2, "lovitt": 2, "neel": [2, 3], "nanda": 2, "catherin": [2, 3], "olsson": 2, "dario": [2, 3], "amodei": [2, 3], "brown": [2, 3], "jack": [2, 3, 5], "clark": 2, "sam": [2, 3, 5], "mccandlish": [2, 3], "chri": [2, 3, 5], "olah": 2, "ben": [2, 3, 5], "mann": 2, "jare": [2, 3], "kaplan": [2, 3], "arxiv": [2, 3, 5, 6], "org": [2, 3, 5, 6], "ab": [2, 3, 5, 6], "2204": 2, "05862": 2, "bkk": 2, "sandipan": 2, "kundu": 2, "goldi": 2, "azalia": 2, "mirhoseini": 2, "cameron": [2, 3, 5, 6], "mckinnon": 2, "carol": 2, "christoph": [2, 3, 5], "dustin": 2, "eli": [2, 3, 5], "tran": [2, 6], "johnson": 2, "ethan": [2, 3], "perez": 2, "jami": [2, 5], "kerr": 2, "mueller": 2, "jeffrei": 2, "ladish": 2, "joshua": [2, 3, 5], "landau": 2, "kamil": [2, 3], "lukosuit": 2, "michael": [2, 3, 5, 6], "sellitto": 2, "schiefer": 2, "noemi": 2, "mercado": 2, "robert": [2, 3], "lasenbi": 2, "robin": 2, "larson": 2, "ringer": 2, "tamera": 2, "lanham": 2, "timothi": [2, 3], "telleen": 2, "lawton": 2, "samuel": [2, 3, 5], "bowman": [2, 3], "2212": 2, "08073": 2, "blo23": 2, "announc": [2, 3], "cc": 2, "11": [2, 3], "ccl": 2, "24": [2, 3, 5, 6], "guim": 2, "hardi": 2, "shunian": 2, "zich": 2, "liu": [2, 3, 5, 6], "feng": [2, 5], "jiang": [2, 3, 5], "benyou": 2, "wang": [2, 3, 5], "judgement": 2, "2402": 2, "10669": 2, "dphz23": 2, "tim": [2, 5], "artidoro": 2, "pagnoni": 2, "ari": [2, 3, 5], "holtzman": [2, 3], "luke": [2, 3, 5], "zettlemoy": 2, "2305": 2, "14314": 2, "ddz": 2, "qingxiu": 2, "xingx": 2, "zhang": [2, 3, 5], "zhifang": 2, "sui": 2, "furu": 2, "wei": [2, 3, 5], "boost": 2, "2410": [2, 5], "06961": 2, "fac24": [2, 3], "huggingfaceh4": 2, "fac4c": 2, "fac4d": 2, "doc": [2, 3, 4, 6], "en": [2, 3, 5, 6], "h44a": 2, "binari": [2, 3], "h44b": 2, "hhj": 2, "shuang": 2, "wenfeng": 2, "han": [2, 3, 5], "tao": [2, 3, 5], "yipe": 2, "haonan": 2, "chunlin": 2, "zhong": [2, 5], "zhangjun": 2, "zhou": [2, 3, 5], "tang": [2, 3, 5], "2401": [2, 3], "01629": 2, "hlt24": 2, "jiwoo": 2, "noah": [2, 3, 5], "lee": [2, 3, 5, 6], "jame": [2, 3], "thorn": 2, "orpo": 2, "monolith": 2, "2403": [2, 3], "07691": 2, "hsw": 2, "21": [2, 3], "edward": [2, 3], "j": [2, 3, 6], "yelong": 2, "shen": [2, 3], "phillip": 2, "walli": 2, "zeyuan": 2, "allen": [2, 3], "zhu": [2, 3], "yuanzhi": 2, "shean": 2, "lu": [2, 3], "weizhu": 2, "2106": 2, "09685": 2, "hgh": 2, "jiaxin": 2, "shixiang": [2, 3, 5], "shane": [2, 3, 5], "gu": [2, 3, 5], "le": [2, 3], "hou": [2, 3], "yuexin": 2, "xuezhi": 2, "hongkun": 2, "yu": [2, 3, 5], "jiawei": 2, "2210": 2, "11610": 2, "huy24": 2, "chip": 2, "reilli": 2, "media": [2, 3], "decemb": [2, 3], "isbn": [2, 3], "9781098129095": 2, "www": [2, 3, 5], "oreilli": 2, "ksy": 2, "seungon": 2, "juyoung": 2, "suk": 2, "xiang": [2, 3], "yue": 2, "vijai": 2, "viswanathan": 2, "seongyun": 2, "yizhong": 2, "kiril": 2, "gashteovski": 2, "carolin": [2, 5], "lawrenc": 2, "sean": [2, 3], "welleck": 2, "graham": 2, "neubig": 2, "2412": 2, "03679": 2, "lt24": 2, "herd": 2, "2407": [2, 3], "21783": 2, "lwx": 2, "lin": [2, 3, 5, 6], "rui": [2, 3, 6], "ruixuan": 2, "xiao": [2, 5], "junbo": 2, "zhao": [2, 3, 5], "ding": 2, "gang": 2, "haobo": 2, "driven": [2, 3, 5], "survei": [2, 3, 5, 6], "2406": [2, 3], "15126": 2, "met24": 2, "owj": 2, "jeff": [2, 3, 5], "diogo": [2, 5], "almeida": [2, 5], "carrol": [2, 5], "wainwright": [2, 5], "pamela": [2, 3, 5], "mishkin": [2, 3, 5], "chong": [2, 5], "sandhini": [2, 5], "agarw": [2, 3, 5], "katarina": [2, 5], "slama": [2, 5], "alex": [2, 3, 5], "rai": [2, 3, 5], "john": [2, 3, 5], "jacob": [2, 3, 5], "hilton": [2, 3], "fraser": 2, "kelton": 2, "miller": [2, 3], "maddi": [2, 5], "simen": [2, 5], "peter": [2, 3, 5], "welind": [2, 3, 5], "paul": [2, 3, 5], "christiano": 2, "jan": [2, 3, 5], "leik": [2, 3, 5], "ryan": [2, 3, 5], "2203": 2, "02155": 2, "qwe24": 2, "rsm": 2, "rafael": 2, "archit": 2, "sharma": 2, "eric": [2, 3, 5], "mitchel": 2, "stefano": [2, 3], "ermon": [2, 3], "man": [2, 3, 5], "chelsea": [2, 5], "finn": 2, "secretli": 2, "18290": 2, "swd": 2, "17": [2, 3], "filip": [2, 5], "wolski": 2, "prafulla": 2, "dhariw": 2, "alec": [2, 3, 5], "radford": [2, 3, 5], "oleg": [2, 5], "klimov": 2, "1707": 2, "06347": 2, "smollm224": 2, "distil": 2, "post": [2, 3, 6], "smollm2360mi24": 2, "sou24": 2, "html": [2, 4, 6], "tm": 2, "23": [2, 3], "hugo": 2, "loui": [2, 3], "martin": [2, 3, 5], "kevin": [2, 3, 5], "stone": 2, "albert": 2, "amjad": 2, "almahairi": 2, "yasmin": 2, "babaei": 2, "nikolai": 2, "bashlykov": 2, "soumya": 2, "batra": 2, "prajjwal": 2, "bhargava": 2, "shruti": 2, "bhosal": 2, "dan": [2, 3], "bikel": 2, "luka": 2, "blecher": 2, "cristian": 2, "canton": 2, "ferrer": 2, "moya": 2, "guillem": 2, "cucurul": 2, "david": [2, 3, 5], "esiobu": 2, "jude": 2, "fernand": 2, "jeremi": [2, 3], "fu": 2, "wenyin": 2, "brian": 2, "fuller": 2, "cynthia": 2, "gao": [2, 3, 5], "vedanuj": 2, "goswami": 2, "naman": 2, "goyal": 2, "anthoni": 2, "hartshorn": 2, "saghar": 2, "hosseini": 2, "hakan": 2, "inan": 2, "marcin": 2, "karda": 2, "viktor": 2, "kerkez": 2, "madian": 2, "khabsa": 2, "isabel": [2, 5], "kloumann": 2, "artem": 2, "korenev": 2, "punit": 2, "singh": [2, 3], "koura": 2, "mari": [2, 3], "ann": 2, "lachaux": 2, "thibaut": 2, "lavril": 2, "jenya": 2, "diana": [2, 3], "liskovich": 2, "yinghai": 2, "yune": 2, "mao": 2, "xavier": 2, "martinet": 2, "todor": [2, 5], "mihaylov": 2, "pushkar": 2, "mishra": [2, 3], "igor": [2, 3, 5], "molybog": 2, "yixin": 2, "nie": [2, 3], "andrew": [2, 3, 5], "poulton": 2, "reizenstein": 2, "rashi": 2, "rungta": 2, "kalyan": 2, "saladi": 2, "alan": [2, 5], "schelten": 2, "ruan": 2, "silva": 2, "smith": [2, 3], "ranjan": 2, "subramanian": 2, "xiaoq": 2, "ellen": 2, "tan": [2, 3], "binh": 2, "ross": [2, 5], "taylor": 2, "adina": 2, "william": [2, 3, 5], "jian": [2, 3], "kuan": 2, "puxin": 2, "zheng": [2, 3, 5], "yan": [2, 3], "iliyan": 2, "zarov": 2, "yuchen": [2, 3, 5], "angela": [2, 3, 5], "fan": [2, 3], "melani": 2, "kambadur": 2, "sharan": 2, "narang": 2, "aurelien": 2, "rodriguez": 2, "stojnic": 2, "sergei": 2, "edunov": 2, "thoma": [2, 3, 5], "scialom": 2, "2307": [2, 6], "09288": 2, "vaa": 2, "berti": [2, 5], "adarsh": 2, "agraw": 2, "ahm": [2, 5], "victor": 2, "akinwand": 2, "namir": 2, "nuaimi": 2, "najla": 2, "alfaraj": 2, "alhajjar": 2, "aroyo": 2, "trupti": 2, "bavalatti": 2, "max": [2, 3], "bartolo": 2, "borhan": 2, "blili": 2, "hamelin": 2, "kurt": 2, "bollack": 2, "rishi": [2, 3], "bomassani": 2, "marisa": 2, "ferrara": 2, "boston": 2, "sim\u00e9on": 2, "campo": 2, "kal": 2, "chakra": 2, "canyu": 2, "codi": 2, "coleman": 2, "zachari": [2, 3], "delpierr": 2, "coudert": 2, "leon": 2, "derczynski": 2, "debojyoti": 2, "dutta": 2, "ian": [2, 3, 5], "eisenberg": 2, "ezick": 2, "heather": [2, 5], "frase": 2, "ram": 2, "gandikota": 2, "agasthya": 2, "gangavarapu": 2, "ananya": [2, 3], "geali": 2, "rajat": 2, "ghosh": [2, 3], "goel": [2, 5], "usman": 2, "gohar": 2, "sujata": 2, "hale": [2, 5], "wiebk": 2, "hutiri": 2, "marvin": [2, 5], "imperi": 2, "surgan": 2, "jandial": 2, "nick": [2, 3, 5], "judd": 2, "felix": [2, 3, 5], "juefei": 2, "fouts": 2, "khomh": 2, "bhavya": 2, "kailkhura": 2, "hannah": [2, 3, 5], "rose": [2, 5], "kirk": [2, 5], "klyman": 2, "knotz": 2, "kuchnik": 2, "shachi": 2, "kumar": [2, 3], "srijan": 2, "lengerich": 2, "bo": [2, 3, 5], "zeyi": 2, "liao": [2, 3], "eileen": 2, "sarah": [2, 3, 5], "luger": 2, "yifan": [2, 3], "priyanka": 2, "mammen": 2, "kelvin": 2, "manyeki": 2, "mcgregor": 2, "virendra": 2, "mehta": [2, 3, 5], "shafe": 2, "moham": 2, "emanuel": [2, 3], "moss": [2, 5], "lama": [2, 5], "nachman": 2, "dinesh": 2, "jinenh": 2, "naganna": 2, "amin": [2, 5], "nikanjam": 2, "besmira": 2, "nushi": 2, "lui": [2, 3], "oala": 2, "iftach": 2, "orr": [2, 3], "alicia": [2, 3], "parrish": [2, 3], "cigdem": 2, "patlak": 2, "pietri": 2, "forough": 2, "poursabzi": 2, "sangdeh": 2, "eleonora": 2, "presani": 2, "fabrizio": 2, "puletti": 2, "r\u00f6ttger": [2, 5], "sahai": 2, "santo": 2, "nino": [2, 5], "scherrer": [2, 5], "alic": [2, 3, 6], "schoenauer": 2, "sebag": 2, "patrick": 2, "schramowski": 2, "abolfazl": 2, "shahbazi": 2, "vin": 2, "xudong": [2, 3], "vamsi": 2, "sistla": 2, "leonard": 2, "testuggin": 2, "vithursan": 2, "thangarasa": 2, "elizabeth": [2, 3, 5], "watkin": 2, "rebecca": [2, 5], "weiss": 2, "welti": 2, "tyler": [2, 3], "wilber": 2, "jean": 2, "poonam": 2, "yadav": 2, "xianjun": 2, "yang": [2, 3, 5], "yi": [2, 3, 5, 6], "zeng": 2, "wenhui": 2, "fedor": 2, "zhdanov": 2, "jiacheng": [2, 3], "perci": [2, 3], "liang": [2, 3], "mattson": 2, "joaquin": 2, "vanschoren": 2, "v0": 2, "2404": [2, 3], "12241": 2, "wyg": 2, "tianhao": [2, 3, 5], "weizh": 2, "yuan": [2, 3, 5], "olga": 2, "golovneva": 2, "jing": 2, "yuandong": 2, "tian": 2, "jiantao": 2, "jiao": 2, "jason": [2, 3, 5], "weston": 2, "sainbayar": 2, "sukhbaatar": 2, "19594": 2, "xfg": 2, "shusheng": 2, "jiaxuan": 2, "wenji": 2, "ye": [2, 3, 5, 6], "weilin": 2, "zhiyu": 2, "mei": [2, 3], "guangju": 2, "chao": 2, "10719": 2, "ywx": 2, "yueqin": 2, "zhendong": 2, "yujia": 2, "xie": [2, 3], "mingyuan": 2, "paradigm": [2, 3], "semanticscholar": 2, "corpusid": 2, "270199610": 2, "doesn": [3, 4, 6], "matter": 3, "beauti": 3, "smart": 3, "agre": 3, "wrong": 3, "richard": [3, 5], "feynman": 3, "advent": 3, "shift": 3, "norm": 3, "realm": 3, "convent": [3, 5], "mere": 3, "evolut": 3, "conceiv": 3, "entrench": 3, "seem": [3, 6], "daunt": 3, "ignor": 3, "relianc": 3, "outdat": [3, 6], "probabilist": 3, "inevit": 3, "setback": 3, "imper": 3, "embrac": 3, "proactiv": [3, 5], "mindset": 3, "front": 3, "produc": [3, 5, 6], "novel": 3, "data": [3, 4, 6], "respons": [3, 4, 5, 6], "ident": 3, "isn": 3, "bug": 3, "random": [3, 5, 6], "testabl": 3, "exceedingli": 3, "complianc": [3, 6], "guarante": [3, 6], "user": [3, 4, 5], "trust": [3, 6], "affect": 3, "primari": 3, "nucleu": 3, "2020": 3, "summari": [3, 5, 6], "alter": 3, "rigid": 3, "wildli": 3, "incoher": 3, "inadequ": [3, 5], "temp": 3, "df_result": 3, "ntemperatur": 3, "40": 3, "temp_respons": 3, "iterrow": 3, "10000": [3, 4, 6], "appl": [3, 4, 6], "sec_fil": [3, 6], "nsecur": 3, "AND": [3, 6], "exchang": [3, 4, 6], "commiss": [3, 4, 6], "nwashington": 3, "20549": 3, "nform": 3, "annual": [3, 5], "pursuant": 3, "TO": 3, "13": 3, "OR": 3, "OF": 3, "THE": 3, "1934": 3, "nfor": 3, "fiscal": [3, 4], "septemb": [3, 4], "28": [3, 4], "nor": 3, "period": [3, 4], "ncommiss": 3, "001": 3, "36743": 3, "ng66145g66i43": 3, "jpg": 3, "nappl": 3, "exact": [3, 5], "registr": 3, "specifi": [3, 4, 6], "charter": 3, "ncalifornia": 3, "t94": 3, "2404110": 3, "jurisdict": 3, "nof": 3, "incorpor": [3, 5], "employ": 3, "identif": 3, "park": 3, "ncupertino": 3, "california": [3, 5, 6], "n95014": 3, "princip": 3, "offic": 3, "408": 3, "996": 3, "1010": 3, "telephon": 3, "area": [3, 5, 6], "regist": 3, "ntitl": 3, "ttrade": 3, "symbol": 3, "tname": 3, "ncommon": 3, "stock": [3, 6], "00001": 3, "naapl": 3, "tthe": 3, "nasdaq": [3, 6], "market": [3, 4, 6], "llc": [3, 6], "n0": 3, "000": [3, 6], "2025": 3, "875": 3, "625": 3, "2026": 3, "2027": 3, "375": 3, "2029": 3, "050": 3, "2031": 3, "600": 3, "2042": 3, "nindic": 3, "season": 3, "issuer": 3, "405": 3, "nye": 3, "preced": 3, "shorter": 3, "past": [3, 5], "90": 3, "submit": 3, "electron": 3, "232": 3, "acceler": 3, "filer": 3, "growth": 3, "12b": [3, 5], "nlarg": 3, "tacceler": 3, "nnon": 3, "tsmaller": 3, "nemerg": 3, "nif": 3, "elect": 3, "revis": 3, "attest": 3, "404": 3, "sarban": 3, "oxlei": 3, "7262": 3, "firm": 3, "prepar": [3, 4], "correct": [3, 6], "restat": 3, "recoveri": 3, "incent": 3, "compens": 3, "240": 3, "10d": 3, "shell": 3, "aggreg": 3, "vote": 3, "held": [3, 6], "affili": [3, 6], "march": [3, 6], "29": [3, 6], "last": [3, 4, 6], "second": [3, 4], "quarter": 3, "approxim": [3, 6], "628": [3, 6], "553": [3, 6], "sole": 3, "disclosur": 3, "director": 3, "date": [3, 6], "exclud": 3, "n15": 3, "115": [3, 6], "823": [3, 6], "outstand": [3, 6], "octob": [3, 6], "18": [3, 6], "ndocument": 3, "BY": 3, "nportion": 3, "proxi": 3, "meet": [3, 5, 6], "sharehold": 3, "iii": 3, "120": 3, "ntabl": 3, "npage": 3, "npart": 3, "nitem": 3, "nbusi": 3, "1a": 3, "nrisk": 3, "1b": 3, "nunresolv": 3, "staff": 3, "comment": 3, "n17": 3, "1c": 3, "ncybersecur": 3, "nproperti": 3, "n18": 3, "nlegal": 3, "proceed": [3, 5], "nmine": 3, "ii": [3, 6], "nmarket": 3, "stockhold": 3, "purchas": 3, "n19": 3, "reserv": 3, "n20": 3, "nmanag": 3, "n21": 3, "7a": 3, "nquantit": 3, "n27": 3, "nfinanci": 3, "supplementari": 3, "n28": 3, "nchang": 3, "disagr": 3, "n51": 3, "9a": 3, "ncontrol": 3, "procedur": 3, "9b": 3, "nother": 3, "n52": 3, "9c": 3, "ndisclosur": 3, "foreign": 3, "ndirector": 3, "corpor": 3, "nexecut": 3, "ownership": 3, "certain": [3, 4, 6], "benefici": 3, "owner": 3, "ncertain": 3, "transact": [3, 5], "nprincip": 3, "fee": 3, "iv": 3, "nexhibit": 3, "n53": 3, "n56": 3, "nthi": 3, "forward": 3, "litig": 3, "reform": 3, "1995": 3, "uncertainti": 3, "event": 3, "assumpt": 3, "macroeconom": 3, "anticip": [3, 5], "caus": [3, 5], "oblig": [3, 4], "nunless": 3, "herein": 3, "calendar": 3, "wholli": 3, "subsidiari": 3, "unless": 3, "ncompani": 3, "manufactur": 3, "smartphon": 3, "tablet": 3, "wearabl": [3, 6], "accessori": 3, "sell": 3, "varieti": 3, "52": 3, "53": 3, "week": 3, "saturdai": 3, "nproduct": 3, "niphon": 3, "io": [3, 6], "iphon": [3, 6], "pro": [3, 4], "se": 3, "nmac": 3, "maco": 3, "mac": [3, 6], "laptop": 3, "macbook": 3, "air": 3, "desktop": 3, "imac": 3, "studio": 3, "nipad": 3, "multipurpos": 3, "ipado": 3, "ipad": [3, 6], "nwearabl": 3, "home": 3, "smartwatch": 3, "wireless": 3, "headphon": 3, "spatial": 3, "watcho": 3, "watch": 3, "ultra": 3, "airpod": 3, "beat": 3, "visiono": 3, "nhome": 3, "tv": 3, "stream": [3, 6], "tvo": 3, "homepod": 3, "fidel": [3, 6], "naccessori": 3, "brand": 3, "third": 3, "parti": 3, "nservic": 3, "nadvertis": 3, "advertis": 3, "licens": 3, "napplecar": 3, "portfolio": [3, 6], "applecar": 3, "prioriti": 3, "network": [3, 6], "repair": 3, "addit": [3, 4, 6], "coverag": 3, "accident": 3, "damag": [3, 5], "theft": 3, "loss": [3, 5], "ncloud": 3, "ndigit": 3, "app": 3, "discov": 3, "download": 3, "music": 3, "podcast": 3, "subscript": 3, "arcad": 3, "sm": 3, "listen": 3, "radio": 3, "station": 3, "magazin": 3, "exclus": 3, "sport": 3, "npayment": 3, "payment": 3, "credit": 3, "pai": 3, "cashless": 3, "nsegment": 3, "primarili": 3, "geograph": 3, "basi": 3, "segment": [3, 4, 6], "america": 3, "europ": 3, "china": 3, "japan": 3, "rest": 3, "asia": 3, "pacif": 3, "north": 3, "south": 3, "european": 3, "india": 3, "middl": 3, "east": 3, "africa": 3, "mainland": 3, "kong": 3, "taiwan": 3, "australia": 3, "asian": 3, "although": 3, "partner": 3, "mid": [3, 4], "enterpris": [3, 6], "resel": 3, "retail": 3, "sale": 3, "indirect": 3, "channel": 3, "cellular": 3, "carrier": 3, "net": [3, 6], "38": 3, "62": 3, "ncompetit": 3, "competit": [3, 5], "character": 3, "price": 3, "downward": 3, "pressur": [3, 5], "gross": [3, 5], "margin": [3, 6], "life": [3, 5], "cycl": 3, "industri": [3, 6], "characterist": 3, "competitor": 3, "compet": 3, "imit": 3, "infring": 3, "intellectu": 3, "innov": [3, 4], "marketplac": 3, "nearli": 3, "reput": 3, "expand": 3, "opportun": [3, 5], "substanti": 3, "broader": 3, "illegitim": 3, "collabor": 3, "nsuppli": 3, "nalthough": 3, "essenti": [3, 4, 5, 6], "particip": 3, "shortag": 3, "commod": 3, "fluctuat": 3, "commonli": 3, "capac": 3, "until": [3, 6], "supplier": 3, "matur": 3, "concentr": 3, "enter": 3, "agreement": 3, "suppli": [3, 6], "renew": 3, "nresearch": 3, "nbecaus": 3, "upon": [3, 4, 5], "flow": [3, 4], "acquisit": 3, "nintellectu": 3, "broad": [3, 6], "patent": 3, "copyright": 3, "trademark": 3, "secret": 3, "differenti": 3, "skill": 3, "personnel": 3, "regularli": 3, "aris": [3, 5], "pursu": [3, 5], "thousand": 3, "durat": 3, "adequ": 3, "nin": 3, "holidai": [3, 5], "fill": 3, "inventori": 3, "older": 3, "newer": 3, "distributor": 3, "nhuman": 3, "capit": [3, 4, 6], "strive": 3, "retain": [3, 4], "talent": 3, "member": 3, "164": 3, "equival": 3, "ncompens": 3, "benefit": [3, 6], "equit": 3, "thrive": [3, 6], "succe": 3, "health": 3, "awai": 3, "ngrowth": 3, "career": 3, "leadership": 3, "influenc": [3, 6], "nworkplac": 3, "equal": 3, "workplac": 3, "ninclus": 3, "sustain": 3, "workforc": 3, "represent": [3, 4], "nengag": 3, "among": 3, "gaug": 3, "sentiment": [3, 6], "nhealth": 3, "everywher": 3, "crisi": 3, "put": 3, "visitor": 3, "navail": 3, "quarterli": 3, "q": 3, "amend": 3, "sec": [3, 4, 6], "Such": [3, 5], "charg": 3, "investor": [3, 6], "aspx": 3, "websit": 3, "press": 3, "environment": [3, 5], "referenc": 3, "inact": 3, "textual": 3, "unknown": 3, "advers": 3, "trend": [3, 6], "conjunct": 3, "consolid": 3, "accompani": 3, "nmacroeconom": 3, "econom": 3, "chain": [3, 4], "facil": 3, "assembli": 3, "site": 3, "nadvers": 3, "slow": 3, "recess": 3, "unemploy": 3, "inflat": 3, "tighter": 3, "currenc": 3, "spend": 3, "monetari": 3, "asset": 3, "contract": 3, "logist": 3, "instabl": [3, 5], "inabl": 3, "financ": 3, "insolv": 3, "failur": 3, "deriv": 3, "counterparti": 3, "debt": 3, "liquid": [3, 4], "fair": [3, 5], "instrument": 3, "polit": 3, "disput": 3, "geopolit": 3, "tension": 3, "terror": 3, "disast": 3, "accid": 3, "interrupt": 3, "npolit": 3, "whole": 3, "outsourc": 3, "korea": 3, "vietnam": 3, "restrict": [3, 6], "tariff": 3, "export": 3, "portion": 3, "revenu": [3, 4, 6], "restructur": 3, "ceas": 3, "disrupt": [3, 4], "escal": [3, 4], "nmani": 3, "prone": 3, "earthquak": 3, "climat": 3, "weather": 3, "occur": [3, 5], "plant": 3, "terrorist": [3, 5], "attack": [3, 5], "hostil": 3, "ransomwar": 3, "cybersecur": 3, "labor": 3, "beyond": 3, "nsuch": 3, "imposs": 3, "slowdown": 3, "outag": 3, "neg": [3, 6], "pandem": 3, "covid": 3, "19": 3, "economi": 3, "imposit": 3, "stringent": 3, "travel": 3, "freight": 3, "movement": 3, "ramp": 3, "nfollow": 3, "expenditur": 3, "resum": 3, "exacerb": 3, "insur": 3, "insuffici": 3, "nglobal": 3, "unabl": 3, "assur": 3, "minor": 3, "naddition": 3, "intensifi": 3, "seamlessli": [3, 4], "nto": 3, "stimul": 3, "ndue": 3, "upgrad": 3, "quantiti": 3, "defect": 3, "defici": 3, "supersed": 3, "nsubstanti": 3, "much": 3, "transport": 3, "diminish": 3, "provis": 3, "reimburs": 3, "warranti": 3, "unanticip": 3, "liabil": 3, "final": [3, 4, 6], "finish": 3, "destin": 3, "made": [3, 4, 6], "prepay": 3, "termin": 3, "recover": 3, "exposur": 3, "nfutur": 3, "semiconductor": 3, "suffer": 3, "poor": 3, "constrain": [3, 4, 6], "shipment": 3, "unexpectedli": 3, "interfer": 3, "unsaf": [3, 5], "expos": 3, "fix": [3, 4], "widespread": [3, 5], "vulner": [3, 5], "compromis": [3, 5], "claim": 3, "modif": [3, 5], "intang": 3, "lost": [3, 4], "cancel": 3, "obsolet": 3, "exce": 3, "realiz": 3, "accru": 3, "excess": 3, "impair": 3, "whenev": 3, "circumst": 3, "amount": [3, 4, 5, 6], "carri": [3, 6], "incur": 3, "unpredict": [3, 6], "pace": [3, 5], "obsolesc": 3, "forecast": 3, "incorrectli": [3, 6], "extens": [3, 4, 6], "issuanc": 3, "unknowingli": 3, "notifi": 3, "preclud": 3, "bui": 3, "percept": 3, "android": 3, "playstat": 3, "nintendo": 3, "xbox": 3, "inclin": 3, "devot": 3, "compel": [3, 6], "dissatisfi": 3, "vast": [3, 5], "storefront": 3, "mechan": [3, 5, 6], "safari": 3, "union": 3, "eu": 3, "dma": 3, "reduct": 3, "narrow": 3, "scope": [3, 4], "elimin": 3, "nfailur": 3, "appeal": 3, "subscrib": 3, "nsome": 3, "manner": [3, 4, 6], "nurtur": 3, "nmuch": 3, "chief": 3, "silicon": 3, "vallei": 3, "constantli": 3, "driver": 3, "recruit": 3, "subsidi": 3, "staf": 3, "contractor": 3, "placement": 3, "increment": 3, "weaken": 3, "telecommun": 3, "war": 3, "virus": 3, "ins": 3, "incid": 3, "redund": 3, "ineffect": 3, "thing": [3, 6], "interf": 3, "imped": 3, "ship": 3, "nloss": 3, "unauthor": 3, "confidenti": 3, "encrypt": 3, "But": [3, 5, 6], "malici": [3, 5], "behalf": 3, "normal": [3, 5, 6], "investig": 3, "penalti": 3, "frequenc": [3, 4], "actor": [3, 5], "circumv": [3, 4, 5], "obfusc": 3, "forens": 3, "hinder": [3, 6], "recov": 3, "perpetr": 3, "profil": 3, "authent": 3, "hack": [3, 5], "malfeas": 3, "faulti": 3, "password": 3, "irregular": 3, "fraudul": 3, "induc": 3, "disclos": [3, 4, 6], "usernam": 3, "turn": 3, "multifactor": 3, "unusu": 3, "freez": 3, "suspici": 3, "nwhile": 3, "ninvest": 3, "contempl": 3, "endeavor": 3, "distract": 3, "tangibl": 3, "approv": 3, "oner": 3, "ventur": 3, "riski": 3, "leas": 3, "unfavor": 3, "arisen": 3, "ordinari": 3, "cours": [3, 5], "resolv": [3, 5], "sometim": [3, 6], "indemnif": 3, "indemnifi": 3, "alleg": 3, "magnitud": 3, "assert": 3, "royalti": 3, "vigor": 3, "defend": 3, "court": 3, "internation": 3, "plaintiff": 3, "injunct": 3, "relief": 3, "nregardless": 3, "merit": 3, "recognit": 3, "settl": 3, "uncertain": 3, "disgorg": 3, "remedi": 3, "worldwid": 3, "antitrust": 3, "bill": 3, "commerc": 3, "mobil": [3, 6], "televis": 3, "film": 3, "anticorrupt": 3, "cash": [3, 4], "repatri": 3, "anti": 3, "launder": 3, "tax": 3, "wast": 3, "recycl": 3, "ncomplianc": 3, "impos": [3, 6], "agent": 3, "nregulatori": 3, "ban": 3, "nexpect": 3, "increasingli": [3, 5, 6], "greenhous": 3, "ga": 3, "emiss": 3, "civil": 3, "disagre": 3, "perceiv": 3, "feder": 3, "scrutini": 3, "nfrom": 3, "engag": [3, 6], "noncompli": 3, "individu": [3, 4, 5], "lawsuit": 3, "monopol": 3, "nfurther": 3, "earn": 3, "googl": [3, 6], "search": 3, "nthere": 3, "retent": 3, "transfer": 3, "pass": [3, 6], "pend": 3, "inquiri": 3, "government": 3, "entiti": [3, 6], "biometr": 3, "notif": 3, "permit": [3, 6], "healthcar": 3, "liabl": 3, "investigatori": 3, "cardhold": 3, "compress": [3, 4], "acquir": 3, "extent": 3, "unexpect": [3, 6], "dollar": 3, "denomin": 3, "offset": 3, "strengthen": 3, "nconvers": 3, "therebi": [3, 4], "thu": 3, "hedg": 3, "deterior": 3, "sovereign": 3, "heighten": 3, "worsen": 3, "A": [3, 4, 5, 6], "collater": 3, "bank": 3, "unsecur": 3, "subassembli": 3, "assembl": 3, "legisl": 3, "ireland": [3, 5], "singapor": 3, "organis": 3, "statutori": 3, "valuat": 3, "defer": 3, "bodi": 3, "adequaci": 3, "ow": 3, "ngener": 3, "volum": [3, 4, 5], "repurchas": 3, "dividend": 3, "consumm": 3, "declar": 3, "board": 3, "unresolv": 3, "nnone": 3, "threat": [3, 5], "postur": 3, "25": 3, "2016": 3, "coordin": 3, "track": 3, "committe": 3, "oversight": 3, "counsel": 3, "chair": 3, "headquart": 3, "cupertino": [3, 6], "center": [3, 6], "formal": [3, 6], "conclud": 3, "uninstal": 3, "web": 3, "browser": 3, "june": 3, "contractu": 3, "desist": 3, "stai": 3, "grant": 3, "ndepart": 3, "justic": 3, "depart": 3, "doj": 3, "district": 3, "attornei": 3, "jersei": 3, "redress": 3, "anticompetit": 3, "nonmonetari": 3, "defens": 3, "nepic": 3, "epic": 3, "northern": 3, "unfair": [3, 5], "enjoin": 3, "extern": 3, "januari": 3, "motion": 3, "oppos": 3, "30": 3, "vacat": 3, "fourth": 3, "mine": 3, "nnot": 3, "aapl": 3, "nholder": 3, "na": 3, "301": 3, "npurchas": 3, "nshare": 3, "nperiod": 3, "ttotal": 3, "taverag": 3, "npaid": 3, "nannounc": 3, "napproxim": 3, "That": [3, 6], "Be": 3, "nunder": 3, "njune": 3, "august": 3, "nopen": 3, "negoti": 3, "t35": 3, "697": 3, "t224": 3, "naugust": 3, "31": 3, "t42": 3, "910": 3, "t221": 3, "39": 3, "nseptemb": 3, "t33": 3, "653": 3, "t222": 3, "86": 3, "ntotal": 3, "t112": 3, "260": 3, "t89": 3, "074": 3, "110": 3, "billion": 3, "previou": [3, 4, 6], "10b5": 3, "graph": 3, "cumul": 3, "reinvest": 3, "dow": 3, "supersector": 3, "27": 3, "2019": 3, "n2218": 3, "tseptemb": 3, "t100": 3, "t207": 3, "t273": 3, "t281": 3, "t322": 3, "t430": 3, "t113": 3, "t156": 3, "t131": 3, "t155": 3, "t210": 3, "ndow": 3, "t146": 3, "t216": 3, "t215": 3, "nfirst": 3, "nsecond": 3, "nthird": 3, "sequoia": 3, "nfourth": 3, "plu": 3, "nfiscal": 3, "six": 3, "realign": 3, "span": 3, "wherea": 3, "indirectli": 3, "n2024": 3, "tchang": 3, "t2023": 3, "t2022": 3, "namerica": 3, "t167": 3, "045": 3, "t3": 3, "t162": 3, "560": 3, "t169": 3, "658": 3, "neurop": 3, "t101": 3, "328": 3, "t7": 3, "294": 3, "t95": 3, "118": 3, "ngreater": 3, "t66": 3, "952": 3, "t72": 3, "559": 3, "t74": 3, "njapan": 3, "t25": 3, "052": 3, "t24": 3, "257": 3, "977": 3, "nrest": 3, "t30": 3, "t4": 3, "t29": 3, "615": 3, "t1": 3, "t391": 3, "035": 3, "t2": 3, "t383": 3, "285": 3, "t394": 3, "weak": [3, 5], "renminbi": 3, "yen": [3, 6], "t201": 3, "183": 3, "t200": 3, "583": 3, "t205": 3, "489": 3, "984": 3, "357": 3, "t40": 3, "177": 3, "t26": 3, "694": 3, "t28": 3, "300": [3, 4], "292": 3, "t37": 3, "005": 3, "t39": 3, "845": [3, 5], "t41": 3, "241": 3, "n96": 3, "169": 3, "t13": 3, "t85": 3, "t9": 3, "t78": 3, "129": 3, "amort": 3, "bundl": 3, "flat": 3, "ngross": 3, "t109": 3, "633": 3, "t108": 3, "803": 3, "t114": 3, "728": 3, "t71": 3, "t60": 3, "345": 3, "t56": 3, "054": 3, "t180": 3, "683": 3, "148": 3, "t170": 3, "782": 3, "t36": 3, "t73": 3, "t70": 3, "t46": 3, "t44": 3, "t43": 3, "noper": 3, "t31": 3, "370": 3, "t5": 3, "915": 3, "t14": 3, "251": 3, "npercentag": 3, "t8": 3, "nsell": 3, "administr": 3, "097": 3, "932": 3, "094": 3, "t6": 3, "t57": 3, "467": 3, "t54": 3, "847": 3, "t51": 3, "t15": 3, "headcount": 3, "nprovis": 3, "749": 3, "t16": 3, "741": 3, "t19": 3, "neffect": 3, "nstatutori": 3, "t21": 3, "aid": 3, "nliquid": 3, "unrestrict": 3, "140": 3, "ndebt": 3, "97": 3, "payabl": 3, "promissori": 3, "nleas": 3, "space": [3, 5], "nmanufactur": 3, "noncancel": 3, "ndeem": 3, "tcja": 3, "paid": 3, "nstate": 3, "fund": 3, "escrow": 3, "ncapit": 3, "95": 3, "nrecent": 3, "pronounc": 3, "nincom": 3, "fasb": 3, "asu": 3, "09": [3, 4], "740": 3, "reconcili": 3, "reconcil": [3, 6], "disaggreg": 3, "prospect": 3, "novemb": [3, 5], "07": [3, 4, 6], "280": 3, "maker": 3, "codm": 3, "alloc": [3, 5], "retrospect": 3, "ncritic": 3, "conform": [3, 6], "gaap": 3, "nuncertain": 3, "domest": 3, "taxat": 3, "resolut": 3, "conting": 3, "26": 3, "still": [3, 5], "ninterest": 3, "forth": 3, "hypothet": 3, "nsensit": 3, "nhypothet": 3, "nrate": 3, "npotenti": 3, "n100": 3, "tenor": 3, "ndeclin": 3, "755": 3, "089": 3, "nterm": 3, "nincreas": 3, "t139": 3, "t194": 3, "nforeign": 3, "express": [3, 6], "var": 3, "mont": 3, "carlo": 3, "interv": 3, "538": 3, "669": 3, "underli": [3, 6], "nindex": 3, "tpage": 3, "nconsolid": 3, "n29": 3, "n30": 3, "sheet": 3, "n31": 3, "n32": 3, "n33": 3, "nnote": 3, "n34": 3, "nreport": 3, "n48": 3, "nall": 3, "omit": [3, 6], "submiss": 3, "nyear": 3, "n2023": 3, "n2022": 3, "nnet": 3, "t294": 3, "866": 3, "t298": 3, "085": 3, "t316": 3, "199": 3, "t96": 3, "ncost": 3, "t185": 3, "233": 3, "t189": 3, "282": 3, "471": 3, "119": 3, "855": 3, "t22": 3, "075": 3, "352": 3, "t214": 3, "137": 3, "t223": 3, "546": 3, "t123": 3, "216": 3, "t119": 3, "437": 3, "t269": 3, "565": 3, "334": 3, "485": 3, "736": 3, "103": 3, "t93": 3, "995": 3, "t99": 3, "nearn": 3, "nbasic": 3, "ndilut": 3, "08": [3, 6], "343": 3, "783": 3, "744": 3, "215": 3, "963": 3, "095": 3, "812": 3, "547": 3, "325": 3, "819": 3, "nsee": 3, "translat": 3, "t395": 3, "765": 3, "511": 3, "unreal": 3, "832": 3, "t323": 3, "212": 3, "nadjust": 3, "337": 3, "717": 3, "394": 3, "138": 3, "850": 3, "563": 3, "104": 3, "t204": 3, "t253": 3, "816": 3, "899": 3, "272": 3, "t98": 3, "016": 3, "652": 3, "t88": 3, "531": 3, "nasset": 3, "ncurrent": 3, "ncash": 3, "943": 3, "965": 3, "228": 3, "590": 3, "naccount": 3, "410": 3, "508": 3, "nvendor": 3, "t32": 3, "833": 3, "477": 3, "ninventori": 3, "286": 3, "331": 3, "287": 3, "695": 3, "t152": 3, "987": 3, "t143": 3, "566": 3, "t91": 3, "479": 3, "544": 3, "t45": 3, "680": 3, "715": 3, "834": 3, "t64": 3, "758": 3, "t211": 3, "993": 3, "t209": 3, "017": 3, "t364": 3, "980": 3, "t352": 3, "nliabil": 3, "t68": 3, "960": 3, "t62": 3, "611": 3, "304": 3, "t58": 3, "829": 3, "ndefer": 3, "249": 3, "061": 3, "ncommerci": 3, "967": 3, "985": 3, "t10": 3, "912": 3, "822": 3, "t176": 3, "392": 3, "t145": 3, "308": 3, "750": 3, "888": 3, "t49": 3, "848": 3, "638": 3, "t308": 3, "030": 3, "t290": 3, "ncommit": 3, "nsharehold": 3, "400": 3, "116": 3, "786": 3, "550": 3, "n83": 3, "276": 3, "naccumul": 3, "deficit": 3, "154": 3, "214": 3, "172": 3, "452": 3, "950": 3, "146": 3, "t50": 3, "672": 3, "t63": 3, "090": 3, "nbegin": 3, "849": 3, "365": 3, "423": 3, "346": 3, "175": 3, "withheld": 3, "settlement": 3, "521": 3, "971": 3, "t12": 3, "034": 3, "t11": 3, "nend": 3, "t83": 3, "nretain": 3, "068": 3, "562": 3, "ndividend": 3, "218": 3, "793": 3, "612": 3, "099": 3, "454": 3, "846": 3, "77": 3, "046": 3, "186": 3, "109": 3, "t163": 3, "rsu": 3, "t0": 3, "98": 3, "94": 3, "32": 3, "737": 3, "929": 3, "ndepreci": 3, "445": 3, "519": 3, "688": 3, "038": 3, "266": 3, "227": 3, "006": 3, "788": 3, "356": 3, "271": 3, "520": 3, "618": 3, "484": 3, "731": 3, "684": 3, "499": 3, "020": 3, "889": 3, "448": 3, "552": 3, "031": 3, "t118": 3, "254": 3, "t110": 3, "543": 3, "t122": 3, "151": 3, "48": 3, "656": 3, "513": 3, "76": 3, "923": 3, "nproce": 3, "211": 3, "686": 3, "917": 3, "135": 3, "828": 3, "446": 3, "447": 3, "959": 3, "708": 3, "086": 3, "935": 3, "705": 3, "354": 3, "nfinanc": 3, "441": 3, "431": 3, "223": 3, "234": [3, 5], "025": 3, "841": 3, "nrepurchas": 3, "949": 3, "89": 3, "402": 3, "465": 3, "nrepay": 3, "958": 3, "repay": 3, "978": 3, "955": 3, "361": 3, "581": 3, "160": 3, "121": 3, "983": 3, "488": 3, "794": 3, "760": 3, "nsupplement": 3, "102": 3, "t18": 3, "679": 3, "573": 3, "33": 3, "nbasi": 3, "prior": 3, "reclassifi": 3, "nrevenu": 3, "remit": 3, "straight": 3, "vest": 3, "treat": 3, "sold": 3, "nderiv": 3, "nonleas": 3, "34": 3, "entitl": 3, "commenc": 3, "deliveri": 3, "stand": 3, "ssp": 3, "icloud": 3, "siri": 3, "discount": 3, "undeliv": 3, "unbil": 3, "n26": 3, "n37": 3, "proport": 3, "moder": 3, "64": 3, "dilut": 3, "nnumer": 3, "ndenomin": 3, "nweight": 3, "312": 3, "316": 3, "856": 3, "antidilut": 3, "tunreal": 3, "ngain": 3, "tfair": 3, "nvalu": 3, "tcash": 3, "nequival": 3, "tcurrent": 3, "tnon": 3, "t27": 3, "nlevel": 3, "nmonei": 3, "t778": 3, "nmutual": 3, "n515": 3, "t105": 3, "t617": 3, "nsubtot": 3, "293": 3, "395": 3, "nu": 3, "treasuri": 3, "516": 3, "t212": 3, "087": 3, "380": 3, "agenc": 3, "159": 3, "t703": 3, "t17": 3, "568": 3, "158": 3, "810": 3, "ncertif": 3, "deposit": 3, "t873": 3, "t387": 3, "t478": 3, "066": 3, "ncorpor": 3, "t65": 3, "622": 3, "t270": 3, "953": 3, "939": 3, "027": 3, "t47": 3, "886": 3, "nmunicip": 3, "t412": 3, "t405": 3, "t190": 3, "nmortgag": 3, "595": 3, "t175": 3, "403": 3, "t23": 3, "367": 3, "278": 3, "t132": 3, "t583": 3, "635": 3, "t128": 3, "056": 3, "966": 3, "t34": 3, "t160": 3, "t688": 3, "650": 3, "36": 3, "359": 3, "t481": 3, "n442": 3, "t428": 3, "t923": 3, "t909": 3, "406": 3, "114": 3, "468": 3, "136": 3, "t271": 3, "533": 3, "048": 3, "491": 3, "332": 3, "t320": 3, "t608": 3, "t76": 3, "840": 3, "956": 3, "890": 3, "t20": 3, "627": 3, "243": 3, "t628": 3, "t602": 3, "t192": 3, "t410": 3, "735": 3, "636": 3, "t344": 3, "t144": 3, "470": 3, "657": 3, "831": 3, "125": 3, "162": 3, "t173": 3, "752": 3, "corrobor": 3, "mortgag": 3, "classifi": 3, "37": 3, "cross": 3, "swap": 3, "remeasur": 3, "notion": 3, "069": 3, "730": 3, "575": 3, "493": 3, "t104": 3, "777": 3, "nhedg": 3, "433": 3, "505": 3, "247": 3, "ntrade": 3, "41": 3, "44": 3, "depreci": 3, "nland": 3, "690": 3, "nmachineri": 3, "t80": 3, "205": 3, "314": 3, "nleasehold": 3, "839": 3, "599": 3, "73": 3, "70": 3, "884": 3, "852": 3, "t55": 3, "906": 3, "601": 3, "703": 3, "010": 3, "457": 3, "634": 3, "391": 3, "neuropean": 3, "opinion": [3, 5], "1991": 3, "2007": 3, "irish": 3, "branch": 3, "2003": 3, "2014": 3, "2015": 3, "minist": 3, "juli": 3, "annul": 3, "ecj": 3, "hear": 3, "asid": 3, "confirm": 3, "unrecogn": 3, "nfeder": 3, "571": 3, "080": 3, "644": 3, "265": 3, "801": 3, "726": 3, "570": 3, "298": 3, "49": 3, "t84": 3, "428": 3, "603": 3, "483": 3, "t347": 3, "t669": 3, "076": 3, "830": 3, "419": 3, "072": 3, "pretax": 3, "72": 3, "71": 3, "ncomput": 3, "885": 3, "012": 3, "124": 3, "518": 3, "nimpact": 3, "246": 3, "311": 3, "366": 3, "397": 3, "nexcess": 3, "893": 3, "871": 3, "192": 3, "739": 3, "ntax": 3, "carryforward": 3, "302": 3, "naccru": 3, "413": 3, "421": 3, "nunreal": 3, "173": 3, "168": 3, "873": 3, "743": 3, "nless": 3, "374": 3, "007": 3, "369": 3, "551": 3, "998": 3, "nright": 3, "179": 3, "nminimum": 3, "674": 3, "940": 3, "t511": 3, "t455": 3, "t490": 3, "805": 3, "202": 3, "indefinit": 3, "temporari": 3, "727": 3, "044": 3, "284": 3, "ndecreas": 3, "386": 3, "463": 3, "982": 3, "542": 3, "936": 3, "070": 3, "expir": 3, "statut": 3, "229": 3, "494": 3, "closur": 3, "intercompani": 3, "exceed": 3, "multiyear": 3, "exercis": 3, "noncash": 3, "rou": 3, "tfinanci": 3, "t2024": 3, "tother": 3, "661": 3, "tproperti": 3, "015": 3, "303": 3, "676": 3, "t165": 3, "t752": 3, "t859": 3, "430": 3, "842": [3, 5], "tfinanc": 3, "n2025": 3, "820": 3, "t171": 3, "991": 3, "n2026": 3, "914": 3, "n2027": 3, "t59": 3, "733": 3, "n2028": 3, "360": 3, "t38": 3, "398": 3, "n2029": 3, "187": 3, "nthereaft": 3, "t837": 3, "undiscount": 3, "790": 3, "imput": 3, "376": 3, "534": 3, "t896": 3, "borrow": 3, "proce": 3, "nine": 3, "nmatur": 3, "333": 3, "264": 3, "948": 3, "645": 3, "309": 3, "arrear": 3, "namount": 3, "n2013": 3, "nfix": 3, "2062": 3, "t97": 3, "341": 3, "03": 3, "65": 3, "t106": 3, "572": 3, "n97": 3, "nunamort": 3, "premium": 3, "321": 3, "358": 3, "113": 3, "662": 3, "930": 3, "342": 3, "800": 3, "180": 3, "88": 3, "ndure": 3, "425": 3, "426": 3, "372": 3, "589": 3, "055": 3, "appreci": 3, "four": 3, "holder": 3, "n2014": 3, "bonu": 3, "nrestrict": 3, "nnumber": 3, "nrsu": 3, "ngrant": 3, "naggreg": 3, "nfair": 3, "nbalanc": 3, "t240": 3, "427": 3, "t75": 3, "t150": 3, "861": 3, "501": 3, "768": 3, "87": 3, "101": 3, "878": 3, "144": 3, "t127": 3, "t135": 3, "91": 3, "456": 3, "78": 3, "59": 3, "t140": 3, "326": 3, "t158": 3, "204": 3, "350": 3, "002": [3, 4], "nuncondit": 3, "uncondit": 3, "206": 3, "440": 3, "156": 3, "t633": 3, "t670": 3, "226": 3, "45": 3, "nconting": 3, "accrual": 3, "nconcentr": 3, "attribut": [3, 6], "46": 3, "t67": 3, "098": 3, "082": 3, "062": 3, "569": 3, "895": 3, "458": 3, "207": 3, "nonrecur": 3, "t142": 3, "196": 3, "t138": 3, "t147": 3, "859": 3, "nchina": 3, "n66": 3, "t181": 3, "887": 3, "t172": 3, "269": 3, "nlong": 3, "664": 3, "797": 3, "778": 3, "219": 3, "47": 3, "nopinion": 3, "nwe": 3, "fairli": 3, "pcaob": 3, "sponsor": 3, "treadwai": 3, "2013": 3, "unqualifi": 3, "thereon": 3, "nthese": 3, "misstat": 3, "fraud": 3, "ndescript": 3, "naudit": 3, "nhow": 3, "nmatter": 3, "qualifi": 3, "letter": 3, "advisor": 3, "ernst": 3, "llp": 3, "auditor": 3, "2009": 3, "nsan": 3, "jose": 3, "nnovemb": 3, "coso": 3, "nour": 3, "ndefinit": 3, "mainten": 3, "disposit": 3, "receipt": 3, "nevalu": 3, "nbase": 3, "13a": 3, "15d": 3, "ninher": 3, "met": 3, "appear": [3, 6], "paragraph": 3, "51": [3, 6], "ninsid": 3, "deirdr": 3, "brien": 3, "vice": 3, "presid": 3, "affirm": 3, "april": 3, "withhold": 3, "remitt": 3, "mr": 3, "copi": [3, 4], "solicit": 3, "id": 3, "00042": 3, "nincorpor": 3, "texhibit": 3, "descript": [3, 6], "tform": 3, "tfile": 3, "nrestat": 3, "namend": 3, "bylaw": 3, "nindentur": 3, "york": [3, 6], "mellon": 3, "truste": 3, "noffic": 3, "certif": 3, "2018": 3, "85": 3, "2043": 3, "05": 3, "2044": 3, "februari": 3, "55": 3, "2045": 3, "900": 3, "700": 3, "60": 3, "250": 3, "2036": 3, "2046": 3, "450": 3, "2047": 3, "2049": 3, "2030": 3, "2050": 3, "2060": 3, "2028": 3, "2041": 3, "2051": 3, "2061": 3, "2032": 3, "2052": 3, "54": 3, "2033": 3, "2053": 3, "ceo": 3, "n12": 3, "nsubsidiari": 3, "n23": 3, "nconsent": 3, "n24": 3, "npower": 3, "signatur": 3, "nrule": 3, "nsection": 3, "1350": 3, "n101": 3, "ninlin": 3, "xbrl": 3, "n104": 3, "inlin": 3, "compensatori": 3, "herewith": 3, "furnish": 3, "herebi": 3, "undertak": 3, "56": 3, "nsignatur": 3, "npursuant": 3, "duli": 3, "undersign": 3, "thereunto": 3, "ndate": 3, "nby": 3, "luca": [3, 6], "maestri": 3, "nluca": 3, "nsenior": 3, "nchief": 3, "nknow": 3, "THESE": 3, "appoint": 3, "cook": 3, "jointli": 3, "hi": [3, 6], "her": 3, "substitut": 3, "him": 3, "thereto": 3, "therewith": 3, "ratifi": 3, "done": [3, 5, 6], "virtu": 3, "hereof": 3, "nname": 3, "ttitl": 3, "tdate": 3, "tchief": 3, "tnovemb": 3, "ntimothi": 3, "tsenior": 3, "kondo": 3, "nchri": 3, "wanda": 3, "austin": 3, "nwanda": 3, "gorski": 3, "tdirector": 3, "nalex": 3, "andrea": [3, 5], "jung": 3, "nandrea": 3, "arthur": 3, "levinson": 3, "narthur": 3, "monica": 3, "lozano": 3, "nmonica": 3, "ronald": 3, "sugar": 3, "nronald": 3, "susan": 3, "wagner": 3, "nsusan": 3, "57": 3, "turbo": [3, 4, 6], "invdestacksmeticsisdict": 3, "setispect": 3, "20cyan": 3, "evaluationseld": 3, "anvis": 3, "droitent": 3, "discernminerv": 3, "versbobprefvers": 3, "vo\u8be5": 3, "option\u548c": 3, "meio": 3, "\u0432\u0440\u0435\u043ccisco": 3, "dellaischenpoihscap": 3, "geme": 3, "gettim": 3, "unscal": 3, "vocabulari": [3, 6], "closer": 3, "sharpen": 3, "uniform": 3, "raschka": 3, "repetit": [3, 4, 6], "radic": 3, "grappl": 3, "safer": 3, "fascin": 3, "spontan": 3, "aren": 3, "linear": 3, "absent": 3, "coax": 3, "journei": 3, "suddenli": 3, "manifest": 3, "deliber": [3, 5], "contend": 3, "70b": 3, "rethink": 3, "tutor": 3, "children": 3, "verifi": [3, 6], "predefin": [3, 6], "weren": 3, "kind": 3, "usual": 3, "resist": 3, "quantif": 3, "contamin": [3, 5], "massiv": [3, 5], "truli": 3, "unseen": 3, "longitudin": 3, "mostli": [3, 6], "versu": 3, "latter": 3, "tailor": 3, "great": [3, 6], "cognit": 3, "misinform": [3, 5], "citat": 3, "tempor": 3, "scientif": 3, "disclaim": 3, "referr": 3, "incorrect": [3, 5], "demograph": 3, "stereotyp": [3, 5], "societ": [3, 5], "pii": 3, "anonym": 3, "leakag": [3, 5], "carryov": 3, "multi": [3, 6], "mathemat": [3, 5], "fallaci": 3, "causal": 3, "think": 3, "idiom": 3, "sarcasm": 3, "terminologi": 3, "lingual": 3, "misunderstand": 3, "syntax": 3, "scan": 3, "compat": [3, 6], "scalabl": [3, 4], "overconfid": 3, "clariti": [3, 4, 6], "audienc": 3, "densiti": 3, "satisfact": [3, 6], "misus": [3, 5], "moral": 3, "co2": 3, "energi": 3, "consumpt": 3, "server": [3, 6], "imag": 3, "audio": 3, "etc": [3, 6], "truth": [3, 6], "layer": [3, 4, 6], "palm": 3, "easi": [3, 4], "synthet": [3, 6], "timeout": 3, "variat": 3, "inter": 3, "rater": 3, "ti": 3, "tier": 3, "holist": 3, "fast": [3, 5, 6], "experiment": [3, 6], "vi": 3, "categor": [3, 6], "intrins": 3, "extrins": 3, "sequenc": [3, 6], "perplex": 3, "downstream": [3, 6], "synthesi": 3, "discret": 3, "prefix": [3, 5], "roug": 3, "bleu": 3, "bilingu": 3, "understudi": 3, "overlap": [3, 4], "favor": [3, 6], "breviti": 3, "insensit": 3, "semant": [3, 4], "orient": 3, "gist": 3, "meteor": 3, "synonym": 3, "stem": [3, 6], "paraphras": 3, "alongsid": [3, 5], "computation": [3, 4], "cider": 3, "consensu": 3, "tf": 3, "idf": 3, "caption": 3, "reliant": 3, "corpu": 3, "ter": 3, "edit": [3, 5], "hypothesi": 3, "penal": 3, "bertscor": 3, "embed": [3, 4], "bert": 3, "spice": 3, "proposit": 3, "scene": 3, "pure": 3, "analyst": [3, 4], "rouge_1": 3, "rouge_2": 3, "ideal": [3, 6], "cheaper": 3, "evaluate_summari": 3, "unigram": 3, "bigram": 3, "absl": 3, "py": 3, "rouge_scor": 3, "generated_summari": 3, "reference_summari": 3, "google_bleu": 3, "bleu_scor": 3, "rouge1": 3, "rouge2": 3, "arbitrari": 3, "chosen": 3, "sentence1": 3, "cat": 3, "sat": 3, "mat": 3, "sentence2": 3, "ate": 3, "3333333333333333": 3, "7272727272727272": 3, "4444444444444445": 3, "generate_summari": 3, "summir": 3, "liner": 3, "excerpt": 3, "evaluate_summary_model": 3, "model_benchmark": 3, "models_test": 3, "benchmark_summari": 3, "model_summari": 3, "evaluation_result": 3, "analyz": [3, 4, 5, 6], "statu": 3, "concis": 3, "element": [3, 6], "verbos": 3, "peripher": 3, "quit": [3, 6], "miss": 3, "convei": [3, 4], "breadth": 3, "Of": [3, 5], "vibe": 3, "visualize_prompt_comparison": 3, "matplotlib": 3, "radar": 3, "radar_plot": 3, "tmp": 3, "ipykernel_1652501": 3, "940173201": 3, "userwarn": 3, "figurecanvasagg": 3, "largest": 3, "deviat": [3, 6], "granular": [3, 4], "likert": 3, "pairwis": 3, "ensembl": 3, "repeatedli": 3, "fluenci": 3, "refin": 3, "narr": 3, "notabl": [3, 6], "henc": 3, "integ": 3, "rubric": 3, "hollist": 3, "judgeevalu": 3, "grammar": [3, 6], "evaluate_with_llm": 3, "criterion": 3, "judge_model": 3, "candidate_summari": 3, "grammat": 3, "y": [3, 6], "z": 3, "w": [3, 4], "benchmark_model": 3, "test_model": 3, "input_text": [3, 4], "trillion": [3, 6], "evals_list": 3, "1775618912": 3, "variant": 3, "slightli": 3, "drift": 3, "lowest": 3, "degrad": [3, 6], "firstli": 3, "overhead": 3, "prefer": [3, 6], "egocentr": 3, "tight": 3, "aproach": 3, "workflow": [3, 6], "aplic": 3, "clearli": [3, 6], "earlier": 3, "depict": [3, 6], "correl": 3, "multilingu": 3, "golden": 3, "languang": 3, "arena": 3, "blind": 3, "randomli": 3, "loop": 3, "customiz": 3, "irrelev": 3, "unhelp": 3, "occasion": 3, "rare": 3, "perfectli": 3, "cater": 3, "critiqu": 3, "elo": 3, "thought": [3, 6], "exam": 3, "probe": 3, "certifi": 3, "began": 3, "glue": 3, "entail": 3, "baselin": 3, "superglu": 3, "deeper": [3, 4], "successor": 3, "grew": 3, "big": 3, "bench": 3, "srivastava": 3, "arithmet": 3, "truthfulqa": 3, "multitask": 3, "hendryck": 3, "multidisciplinari": 3, "stanford": 3, "helm": 3, "multidimension": 3, "surround": [3, 6], "humanev": 3, "lmsy": 3, "brought": 3, "dialogu": 3, "chiang": 3, "alpacaev": 3, "duboi": 3, "mt": 3, "Their": [3, 5, 6], "render": 3, "crowdsourc": 3, "livebench": 3, "white": 3, "resili": 3, "meaningfulli": 3, "zebralog": 3, "grid": 3, "puzzl": 3, "brailsford": 3, "1999": 3, "lsat": 3, "hous": 3, "clue": 3, "strateg": [3, 5, 6], "deduct": 3, "arriv": 3, "programmat": [3, 6], "2x2": 3, "6x6": 3, "reductio": 3, "ad": [3, 6], "absurdum": 3, "sonnet": [3, 4], "hard": 3, "10b": 3, "counterfactu": 3, "came": 3, "arc": 3, "prize": 3, "chollet": 3, "mike": [3, 5], "knoop": 3, "founder": 3, "zapier": 3, "fran\u00e7oi": 3, "creator": 3, "agi": 3, "kera": 3, "genuin": 3, "possess": 3, "elementari": 3, "novelti": 3, "someth": 3, "wouldn": 3, "interpol": 3, "synthes": 3, "fly": 3, "brute": 3, "pixel": 3, "perfect": 3, "unbeaten": 3, "win": 3, "poorli": 3, "recombin": 3, "spur": [3, 5], "takeawai": 3, "fourrier": 3, "bespok": 3, "sdk": 3, "autoregress": 3, "sub": 3, "liter": 3, "disturb": 3, "zero": [3, 6], "varianc": 3, "yt": 3, "ut": 3, "suppos": [3, 6], "ol": 3, "heteroscedast": 3, "regress": 3, "wish": 3, "lag": [3, 5], "bivari": 3, "evaluation_track": 3, "evaluationtrack": 3, "model_config": 3, "basemodelconfig": 3, "parallelismmanag": 3, "pipelineparamet": 3, "envconfig": 3, "is_accelerate_avail": 3, "datetim": 3, "timedelta": 3, "initprocessgroupkwarg": 3, "create_evaluation_pipelin": 3, "cache_dir": 3, "pretrain": 3, "float16": 3, "max_sampl": 3, "kwargs_handl": 3, "3000": 3, "save_detail": 3, "pipeline_param": 3, "launcher_typ": 3, "env_config": 3, "override_batch_s": 3, "use_chat_templ": 3, "trust_remote_cod": 3, "pipeline_paramet": 3, "schemat": [3, 4], "vllm": [3, 6], "tgi": 3, "storag": 3, "num_few_shot": 3, "vertic": 3, "bar": 3, "bigbench": 3, "winogrand": 3, "hellaswag": 3, "nlp": 3, "save_and_push_result": 3, "show_result": 3, "model_arg": 3, "send": [3, 6], "serverless": 3, "inference_server_address": 3, "inference_server_auth": 3, "model_id": 3, "null": 3, "bash": 3, "command": 3, "model_config_path": 3, "endpoint_model": 3, "llama3": [3, 4], "qwen2": [3, 6], "smollm2": 3, "3b": 3, "alibaba": [3, 6], "5b": [3, 6], "hui": 3, "allal": 3, "cluster": 3, "noteworthi": 3, "grain": [3, 6], "salt": [3, 6], "exponenti": 3, "modular": 3, "offici": 3, "revisit": 3, "trace": 3, "langchain_tracing_v2": 3, "langchain_api_kei": 3, "hf_evalu": 3, "langsmith_evalu": 3, "ls_client": 3, "dataset_nam": 3, "create_dataset": 3, "create_exampl": 3, "dataset_id": 3, "calculate_scor": 3, "reference_output": 3, "oai_client": 3, "xp_model_nam": 3, "lastli": 3, "run_evalu": 3, "And": 3, "upload_result": 3, "experiment_prefix": 3, "num_repetit": 3, "386a3620": 3, "9e1cc3cb": 3, "9d6a": 3, "4356": 3, "ab34": 3, "138e0abe8be4": 3, "8741976e": 3, "5268": 3, "4b75": 3, "949f": 3, "99477dde5d64": 3, "selectedsess": 3, "b831dc1e": 3, "90bc": 3, "4ed8": 3, "8080": 3, "fb42444724d6": 3, "4it": 3, "latest": [3, 4, 6], "tobia": 3, "evaluate_modul": 3, "6fc70b7be0088120a372dfdd5d320b39b8bb3630cb8029b193941d9376e86bb0": 3, "tue": 3, "nov": 3, "couldn": 3, "5it": 3, "5053784e": 3, "64445871": 3, "a53c": 3, "44b1": 3, "a422": 3, "4f49b2f9656f": 3, "69": 3, "4b29f3c9": 3, "9ef7e39a": 3, "2add": 3, "410c": 3, "89f8": 3, "9f1a8b198cf1": 3, "61": 3, "insert": 3, "combined_df": 3, "concat": 3, "ignore_index": 3, "execution_tim": 3, "example_id": 3, "333333": 3, "224388": 3, "feb10f92": 3, "3167": 3, "41f3": 3, "bb1c": 3, "d271153a31a8": 3, "5b196b22": 3, "9f4c": 3, "489c": 3, "b020": 3, "7823208b42d6": 3, "348101": 3, "722464": 3, "c310f159": 3, "064a": 3, "4035": 3, "97c3": 3, "a25bbf43abc2": 3, "386076": 3, "704104": 3, "f7f24899": 3, "dd50": 3, "409e": 3, "93cc": 3, "6fb1622b60bf": 3, "443038": 3, "725059": 3, "242856d6": 3, "efb5": 3, "4101": 3, "b1cf": 3, "5805532838ac": 3, "373418": 3, "795302": 3, "ce975169": 3, "a0ab": 3, "40ce": 3, "8e32": 3, "efa28d06079d": 3, "stat": 3, "groupbi": 3, "agg": 3, "sort": 3, "sort_valu": 3, "figur": [3, 6], "subplot": 3, "pyplot": 3, "plt": 3, "numpi": 3, "np": 3, "ax1": 3, "ax2": 3, "figsiz": 3, "2ecc71": 3, "3498db": 3, "e74c3c": 3, "bleu_mean": 3, "bleu_std": 3, "enumer": [3, 4], "errorbar": 3, "yerr": 3, "fmt": 3, "markers": 3, "capsiz": 3, "set_ylabel": 3, "set_titl": 3, "set_xtick": 3, "set_xticklabel": 3, "rotat": 3, "set_ylim": 3, "bottom": 3, "legend": 3, "exec_mean": 3, "exec_std": 3, "tight_layout": 3, "ndetail": 3, "4038": 3, "0453": 3, "7815": 3, "0433": 3, "3768": 3, "0424": 3, "8343": 3, "2208": 3, "3519": 3, "0775": 3, "9122": 3, "1482": 3, "377": 3, "042": 3, "078": 3, "slower": 3, "04": [3, 4], "latenc": [3, 4], "speed": 3, "interestingli": 3, "decoupl": 3, "reload": 3, "facilit": 3, "promptfooconfig": 3, "model_comparison": 3, "pretti": 3, "dump": 3, "default_flow_styl": 3, "sort_kei": 3, "prompt1": 3, "defaulttest": 3, "1000m": 3, "millisecond": 3, "eval_data": 3, "latency_m": 3, "totallatencym": 3, "token_usag": 3, "tokenusag": 3, "assert_pass": 3, "assertpasscount": 3, "assert_fail": 3, "assertfailcount": 3, "prompt_token": 3, "num_request": 3, "numrequest": 3, "2463": 3, "000035": 3, "3773": 3, "004620": 3, "1669": 3, "000091": 3, "1669m": 3, "highest": 3, "3773m": 3, "00462": 3, "promptfool": 3, "manual": 3, "redefin": 3, "prompt_comparison": 3, "prompt2": 3, "prompt3": 3, "prompt_fil": 3, "prompt_cont": 3, "BE": 3, "again": 3, "prompt_id": 3, "promptid": 3, "gradingresult": 3, "df_raw": 3, "reset_index": 3, "eas": [3, 5], "seamless": 3, "hf": 3, "plain": 3, "vanilla": 3, "defi": 3, "accustom": 3, "legaci": 3, "unsustain": 3, "prd": 3, "cultiv": 3, "organiz": 3, "stagnat": 3, "alb": 3, "loubna": 3, "anton": 3, "lozhkov": 3, "bakouch": 3, "gabriel": [3, 5], "mart\u00edn": 3, "bl\u00e1zquez": 3, "lewi": 3, "tunstal": 3, "agust\u00edn": 3, "piquer": 3, "andr": 3, "marafioti": 3, "cyril": 3, "zakka": 3, "leandro": 3, "von": 3, "werra": 3, "wolf": 3, "are24": 3, "judgearena": 3, "bps99": 3, "salli": 3, "pott": 3, "barbara": 3, "557": 3, "sciencedirect": 3, "s0377221798003646": 3, "doi": [3, 5, 6], "1016": 3, "s0377": 3, "2217": 3, "00364": 3, "ctj": 3, "jerri": [3, 5], "tworek": [3, 5], "heewoo": [3, 5], "jun": [3, 5], "qime": [3, 5], "henriqu": [3, 5], "pond": [3, 5], "de": [3, 5], "oliveira": [3, 5], "pinto": [3, 5], "harri": [3, 5], "yuri": 3, "burda": 3, "greg": [3, 5], "brockman": [3, 5], "raul": [3, 5], "puri": [3, 5], "gretchen": [3, 5], "krueger": [3, 5], "petrov": [3, 5], "heidi": 3, "khlaaf": 3, "girish": [3, 5], "sastri": [3, 5], "brook": [3, 5], "chan": [3, 5], "grai": [3, 5], "ryder": [3, 5], "mikhail": [3, 5], "pavlov": [3, 5], "alethea": [3, 5], "lukasz": 3, "kaiser": [3, 5], "mohammad": [3, 5], "bavarian": [3, 5], "clemen": [3, 5], "winter": [3, 5], "philipp": 3, "tillet": [3, 5], "felip": [3, 5], "petroski": [3, 5], "dave": [3, 5], "cum": [3, 5], "matthia": 3, "plappert": 3, "fotio": 3, "chantzi": [3, 5], "barn": 3, "ariel": 3, "herbert": 3, "voss": [3, 5], "hebgen": 3, "guss": 3, "nichol": 3, "paino": [3, 5], "nikola": [3, 5], "tezak": [3, 5], "jie": [3, 5], "babuschkin": [3, 5], "suchir": [3, 5], "balaji": [3, 5], "shantanu": [3, 5], "jain": [3, 5], "saunder": 3, "hess": [3, 5], "carr": 3, "josh": [3, 5], "achiam": [3, 5], "vedant": 3, "misra": 3, "evan": [3, 5], "morikawa": [3, 5], "matthew": 3, "knight": [3, 5], "mile": [3, 5], "brundag": [3, 5], "mira": [3, 5], "murati": [3, 5], "kati": [3, 5], "mayer": [3, 5], "bob": [3, 5, 6], "mcgrew": [3, 5], "ilya": [3, 5], "sutskev": [3, 5], "wojciech": [3, 5], "zaremba": [3, 5], "2107": 3, "03374": 3, "cz": 3, "lianmin": 3, "ying": 3, "sheng": 3, "anastasio": 3, "angelopoulo": 3, "tianl": 3, "dacheng": 3, "banghua": 3, "jordan": [3, 5], "gonzalez": 3, "ion": 3, "stoica": 3, "04132": 3, "cho24a": 3, "francoi": 3, "arcpriz": 3, "cho24b": 3, "dglh24": 3, "yann": 3, "bal\u00e1z": 3, "galambosi": 3, "tatsunori": 3, "hashimoto": 3, "debia": 3, "04475": 3, "fac24a": 3, "wiki": [3, 6], "fac24b": 3, "fac24c": 3, "model_doc": 3, "fac24d": 3, "cookbook": 3, "llm_judg": 3, "fac24f": 3, "fhwt23": 3, "cl\u00e9mentin": 3, "nathan": 3, "habib": 3, "hbb": 3, "collin": 3, "burn": 3, "steven": [3, 5], "basart": 3, "zou": 3, "manta": 3, "mazeika": 3, "song": [3, 5], "steinhardt": 3, "03300": 3, "hbd": 3, "du": 3, "maxwel": 3, "forb": 3, "yejin": 3, "choi": 3, "curiou": 3, "neural": [3, 6], "degener": 3, "1904": 3, "09751": 3, "hyc": 3, "binyuan": 3, "zeyu": 3, "cui": 3, "jiaxi": 3, "dayiheng": 3, "lei": [3, 5], "tianyu": 3, "jiajun": 3, "bowen": [3, 5], "kai": [3, 5], "dang": 3, "coder": 3, "preprint": [3, 6], "2409": 3, "12186": 3, "lx": 3, "zhen": 3, "xiaohan": 3, "jia": 3, "yuxuan": 3, "lai": 3, "chongyang": 3, "shuai": 3, "ma": [3, 5], "nlg": 3, "07103": 3, "lbl": 3, "bommasani": 3, "toni": 3, "dimitri": 3, "tsipra": 3, "dilara": 3, "soylu": 3, "michihiro": 3, "yasunaga": 3, "yian": 3, "deepak": 3, "narayanan": 3, "yuhuai": 3, "benjamin": [3, 5], "newman": 3, "binhang": 3, "bobbi": 3, "ce": 3, "christian": [3, 5], "cosgrov": 3, "r\u00e9": 3, "acosta": 3, "nava": [3, 5], "drew": 3, "hudson": 3, "zelikman": 3, "esin": 3, "durmu": 3, "faisal": 3, "ladhak": 3, "frieda": 3, "rong": 3, "hongyu": 3, "ren": 3, "huaxiu": 3, "yao": [3, 5], "jue": 3, "keshav": 3, "santhanam": 3, "laurel": 3, "lucia": 3, "mert": 3, "yuksekgonul": 3, "mirac": 3, "suzgun": 3, "guha": 3, "niladri": 3, "chatterji": 3, "omar": 3, "khattab": 3, "henderson": 3, "qian": [3, 5], "chi": [3, 6], "sang": 3, "shibani": [3, 5], "santurkar": [3, 5], "surya": 3, "icard": 3, "tianyi": 3, "vishrav": 3, "chaudhari": 3, "xuechen": 3, "yuhui": 3, "yuta": 3, "koreeda": 3, "2211": 3, "09110": 3, "lbc24": 3, "ronan": 3, "bra": 3, "allenai": 3, "lhe22": 3, "stephani": [3, 5], "owain": 3, "mimic": 3, "falsehood": 3, "2109": 3, "07958": 3, "pro24": 3, "dev": 3, "ras24": 3, "sebastian": 3, "scratch": 3, "1633437166": 3, "srr": 3, "aarohi": 3, "abhinav": 3, "rastogi": 3, "abhishek": 3, "rao": 3, "abu": 3, "awal": 3, "shoeb": 3, "abubakar": 3, "abid": 3, "adam": [3, 5], "fisch": 3, "santoro": 3, "aditya": [3, 5], "gupta": 3, "adri\u00e0": 3, "garriga": 3, "alonso": 3, "agnieszka": 3, "kluska": 3, "aitor": 3, "lewkowycz": 3, "akshat": 3, "warstadt": 3, "alexand": [3, 6], "kocurek": 3, "ali": [3, 5], "safaya": 3, "tazarv": 3, "aman": 3, "hussain": 3, "dsouza": 3, "ambros": 3, "slone": 3, "ameet": 3, "rahan": 3, "anantharaman": 3, "iyer": 3, "ander": 3, "andreassen": 3, "madotto": 3, "santilli": 3, "stuhlm\u00fcller": 3, "la": 3, "lampinen": 3, "angelica": 3, "anh": 3, "vuong": 3, "animesh": 3, "gottardi": 3, "antonio": 3, "norelli": 3, "anu": 3, "venkatesh": 3, "arash": 3, "gholamidavoodi": 3, "arfa": 3, "tabassum": 3, "arul": 3, "menez": 3, "arun": [3, 5], "kirubarajan": 3, "asher": 3, "mullokandov": 3, "ashish": 3, "sabharw": 3, "herrick": 3, "avia": 3, "efrat": 3, "aykut": 3, "erdem": 3, "ayla": 3, "karaka\u015f": 3, "bao": [3, 5], "loe": 3, "barret": [3, 5], "zoph": [3, 5], "bart\u0142omiej": 3, "bojanowski": 3, "batuhan": 3, "\u00f6zyurt": 3, "behnam": 3, "hedayatnia": 3, "neyshabur": 3, "inden": 3, "benno": 3, "stein": 3, "berk": 3, "ekmekci": 3, "blake": 3, "howald": 3, "bryan": 3, "orinion": 3, "diao": 3, "dour": 3, "stinson": 3, "cedrick": 3, "argueta": 3, "c\u00e9sar": 3, "ferri": 3, "ram\u00edrez": 3, "chandan": 3, "charl": 3, "rathkopf": 3, "chenlin": 3, "meng": 3, "chitta": 3, "baral": 3, "chiyu": 3, "callison": 3, "burch": 3, "wait": 3, "voigt": 3, "cindi": 3, "ramirez": 3, "clara": 3, "rivera": 3, "clemencia": 3, "siro": 3, "colin": 3, "raffel": 3, "courtnei": 3, "ashcraft": 3, "cristina": 3, "garbacea": 3, "damien": [3, 5], "sileo": 3, "garrett": 3, "kilman": 3, "roth": 3, "daniel": [3, 5], "freeman": 3, "khashabi": 3, "levi": [3, 5], "mosegu\u00ed": 3, "gonz\u00e1lez": 3, "perszyk": 3, "danqi": 3, "daphn": 3, "ippolito": 3, "dar": 3, "gilboa": 3, "dohan": [3, 5], "drakard": 3, "jurgen": 3, "debajyoti": 3, "datta": 3, "deni": 3, "emelin": 3, "kleyko": 3, "deniz": 3, "yuret": 3, "derek": [3, 5], "tam": [3, 6], "dieuwk": 3, "hupk": 3, "diganta": 3, "dilyar": 3, "buzan": 3, "coelho": 3, "mollo": 3, "diyi": 3, "ho": 3, "dylan": 3, "schrader": 3, "ekaterina": 3, "shutova": 3, "ekin": 3, "dogu": 3, "cubuk": 3, "elad": 3, "segal": 3, "eleanor": 3, "hagerman": 3, "donowai": 3, "elli": 3, "pavlick": 3, "rodola": 3, "emma": 3, "lam": 3, "chu": [3, 5], "erkut": 3, "erni": 3, "dyer": 3, "jerzak": 3, "eunic": 3, "engefu": 3, "manyasi": 3, "evgenii": 3, "zheltonozhskii": 3, "fanyu": 3, "xia": 3, "fatemeh": 3, "siar": 3, "fernando": 3, "mart\u00ednez": 3, "plume": 3, "francesca": 3, "happ\u00e9": 3, "gaurav": 3, "genta": 3, "indra": 3, "winata": 3, "gerard": 3, "melo": 3, "germ\u00e1n": 3, "kruszewski": 3, "giambattista": [3, 5], "parascandolo": [3, 5], "giorgio": 3, "mariani": 3, "gloria": 3, "gonzalo": 3, "jaimovitch": 3, "l\u00f3pez": 3, "gregor": 3, "betz": 3, "gui": 3, "gur": 3, "hana": 3, "galijasev": 3, "rashkin": 3, "hannaneh": 3, "hajishirzi": 3, "harsh": 3, "hayden": 3, "bogar": 3, "henri": [3, 5], "shevlin": 3, "hinrich": 3, "sch\u00fctze": 3, "hiromu": 3, "yakura": 3, "hongm": 3, "hugh": 3, "mee": 3, "wong": [3, 5], "ng": [3, 5], "isaac": 3, "nobl": 3, "jaap": 3, "jumelet": 3, "geissing": 3, "jaehoon": 3, "jaim": 3, "fern\u00e1ndez": 3, "fisac": 3, "simon": 3, "koppel": 3, "koco\u0144": 3, "jana": 3, "thompson": [3, 5], "janel": 3, "wingfield": 3, "jarema": 3, "radom": 3, "jascha": 3, "sohl": [3, 5], "dickstein": 3, "phang": 3, "yosinski": 3, "jekaterina": 3, "novikova": 3, "jell": 3, "bosscher": 3, "jennif": 3, "marsh": 3, "jeroen": 3, "taal": 3, "jess": [3, 5], "engel": 3, "jesujoba": 3, "alabi": 3, "jiam": 3, "jillian": 3, "joan": 3, "waweru": 3, "burden": 3, "bali": 3, "jonathan": [3, 5], "batcheld": 3, "berant": 3, "j\u00f6rg": 3, "frohberg": 3, "jo": 3, "rozen": 3, "orallo": 3, "boudeman": 3, "guerr": 3, "tenenbaum": 3, "joyc": 3, "chua": 3, "kanclerz": 3, "karen": 3, "livescu": 3, "karl": 3, "krauth": 3, "karthik": 3, "gopalakrishnan": 3, "katerina": 3, "ignatyeva": 3, "katja": 3, "markert": 3, "kaustubh": 3, "dhole": 3, "gimpel": 3, "omondi": 3, "kori": 3, "mathewson": 3, "kristen": 3, "chiafullo": 3, "ksenia": 3, "shkaruta": 3, "shridhar": 3, "kyle": [3, 5], "mcdonel": 3, "richardson": 3, "laria": 3, "reynold": 3, "leo": [3, 5], "liam": [3, 5], "dugan": 3, "lianhui": 3, "qin": [3, 5], "lidia": 3, "contrera": 3, "ochando": 3, "morenc": 3, "moschella": 3, "luci": 3, "ludwig": 3, "schmidt": [3, 5], "luheng": 3, "olivero": 3, "col\u00f3n": 3, "metz": [3, 5], "l\u00fctfi": 3, "kerem": 3, "\u015fenel": 3, "maarten": [3, 5], "bosma": 3, "sap": [3, 5], "maartj": 3, "hoev": 3, "maheen": 3, "farooqi": 3, "manaal": 3, "faruqui": 3, "marco": 3, "baturan": 3, "marelli": 3, "maru": 3, "maria": 3, "quintana": 3, "tolkiehn": 3, "mario": [3, 5], "giulianelli": 3, "martha": 3, "potthast": 3, "leavitt": 3, "hagen": 3, "m\u00e1ty\u00e1": 3, "schubert": 3, "medina": [3, 5], "orduna": 3, "baitemirova": 3, "melodi": 3, "arnaud": 3, "melvin": 3, "mcelrath": 3, "yee": 3, "cohen": 3, "ivanitskii": 3, "starritt": 3, "strube": 3, "micha\u0142": 3, "sw\u0119drowski": 3, "michel": [3, 5], "bevilacqua": 3, "mihir": 3, "kale": 3, "cain": 3, "mime": 3, "mitch": 3, "walker": 3, "mo": 3, "tiwari": 3, "mohit": 3, "bansal": 3, "moin": 3, "aminnaseri": 3, "mor": 3, "geva": 3, "mozhdeh": 3, "gheini": 3, "mukund": 3, "varma": 3, "nanyun": 3, "peng": [3, 5], "nayeon": 3, "neta": 3, "krakov": 3, "doiron": 3, "nicol": 3, "martinez": 3, "nikita": 3, "nangia": 3, "nikla": 3, "decker": 3, "muennighoff": 3, "nitish": [3, 5], "shirish": [3, 5], "keskar": [3, 5], "niveditha": 3, "constant": 3, "fiedel": 3, "nuan": 3, "wen": 3, "oliv": 3, "agha": 3, "elbaghdadi": 3, "omer": 3, "moreno": 3, "casar": 3, "parth": 3, "doshi": 3, "pascal": 3, "fung": 3, "pu": 3, "vicol": 3, "pegah": 3, "alipoormolabashi": 3, "peiyuan": 3, "eckerslei": 3, "phu": 3, "mon": 3, "htut": 3, "pinyu": 3, "hwang": 3, "piotr": 3, "mi\u0142kowski": 3, "piyush": 3, "patil": 3, "pouya": 3, "pezeshkpour": 3, "priti": 3, "oli": 3, "qiaozhu": 3, "qing": 3, "lyu": 3, "qinlang": 3, "rabin": 3, "banjad": 3, "rachel": [3, 5], "etta": 3, "rudolph": 3, "raefer": 3, "rahel": 3, "haback": 3, "ramon": 3, "risco": 3, "rapha\u00ebl": 3, "milli\u00e8r": 3, "rhythm": 3, "garg": 3, "rif": 3, "saurou": 3, "riku": 3, "arakawa": 3, "robb": 3, "raymaek": 3, "frank": [3, 5], "rohan": 3, "sikand": 3, "roman": 3, "novak": 3, "sitelew": 3, "lebra": 3, "rosann": 3, "rowan": [3, 5], "ruslan": 3, "salakhutdinov": 3, "stoval": 3, "teehan": 3, "rylan": 3, "sahib": 3, "saif": 3, "sajant": 3, "anand": [3, 5], "dillav": 3, "shleifer": 3, "wiseman": 3, "gruetter": 3, "schoenholz": 3, "sanghyun": 3, "sanjeev": 3, "kwatra": 3, "sarik": 3, "ghazarian": 3, "sayan": 3, "casei": [3, 5], "bischoff": 3, "gehrmann": 3, "schuster": 3, "sepideh": 3, "sadeghi": 3, "shadi": 3, "hamdan": 3, "sharon": 3, "shashank": 3, "sherri": 3, "shi": 3, "shikhar": 3, "shima": 3, "asaadi": 3, "shubh": 3, "pachchigar": 3, "shubham": 3, "toshniw": 3, "shyam": [3, 5], "upadhyai": 3, "shyamolima": 3, "debnath": 3, "siamak": 3, "shakeri": 3, "thormey": 3, "melzi": 3, "siva": 3, "reddi": 3, "sneha": 3, "priscilla": 3, "makini": 3, "soo": 3, "hwan": 3, "spencer": 3, "toren": 3, "sriharsha": 3, "hatwar": 3, "stanisla": 3, "dehaen": 3, "stefan": 3, "divic": 3, "stella": 3, "biderman": 3, "stephen": 3, "prasad": 3, "piantadosi": 3, "stuart": [3, 5], "shieber": 3, "summer": [3, 5], "misherghi": 3, "svetlana": 3, "kiritchenko": 3, "swaroop": 3, "tal": 3, "linzen": 3, "tariq": 3, "tatsu": 3, "te": 3, "th\u00e9o": 3, "desbord": 3, "theodor": 3, "rothschild": 3, "phan": 3, "tiberiu": 3, "nkinyili": 3, "timo": 3, "schick": 3, "timofei": 3, "kornev": 3, "titu": 3, "tunduni": 3, "gerstenberg": 3, "trenton": 3, "trishala": 3, "neeraj": 3, "tushar": 3, "khot": 3, "shultz": 3, "uri": 3, "shaham": 3, "vera": 3, "demberg": 3, "victoria": [3, 5], "nyamai": 3, "vika": 3, "raunak": 3, "vinai": 3, "ramasesh": 3, "udai": 3, "prabhu": 3, "vishakh": 3, "padmakumar": 3, "vivek": 3, "srikumar": 3, "fedu": [3, 5], "wout": 3, "vossen": 3, "xiaoyu": 3, "tong": [3, 5], "xinran": 3, "xinyi": 3, "yadollah": 3, "yaghoobzadeh": 3, "yair": 3, "lakretz": 3, "yangqiu": 3, "yasaman": 3, "bahri": 3, "yichi": 3, "yide": 3, "yifu": 3, "yonatan": 3, "belinkov": 3, "yufang": 3, "seid": 3, "zhuoy": 3, "zijian": 3, "ziji": 3, "zirui": 3, "ziyi": 3, "extrapol": 3, "2206": 3, "04615": 3, "wpn": 3, "yada": 3, "pruksachatkun": 3, "amanpreet": 3, "julian": 3, "hill": 3, "stickier": 3, "wsm": 3, "1804": 3, "07461": 3, "wtb": 3, "tai": 3, "borgeaud": 3, "dani": 3, "yogatama": 3, "denni": [3, 5], "donald": 3, "metzler": 3, "ed": 3, "oriol": 3, "vinyal": 3, "dean": 3, "07682": 3, "wdr": 3, "doolei": 3, "manlei": 3, "arka": [3, 5], "pal": 3, "feuer": 3, "siddhartha": 3, "ravid": 3, "shwartz": [3, 5], "ziv": 3, "khalid": 3, "saifullah": 3, "siddartha": 3, "naidu": 3, "chinmai": 3, "hegd": 3, "lecun": 3, "goldstein": 3, "willi": 3, "neiswang": 3, "micah": 3, "goldblum": 3, "19314": 3, "yyh": 3, "baosong": 3, "chengpeng": 3, "chengyuan": 3, "fei": 3, "guant": 3, "haoran": 3, "huan": 3, "jialong": 3, "jialin": 3, "jianhong": 3, "tu": 3, "jianwei": 3, "jianxin": 3, "jin": [3, 5], "jingren": 3, "jinz": 3, "jinzheng": 3, "junyang": 3, "keme": 3, "keqin": 3, "kexin": 3, "mingfeng": 3, "xue": [3, 5], "ni": 3, "pei": 3, "ru": 3, "men": 3, "ruiz": 3, "runji": 3, "shiji": 3, "sinan": 3, "tianhang": 3, "wenbin": 3, "ge": 3, "xiaodong": 3, "deng": 3, "xiaohuan": 3, "xingzhang": 3, "xinyu": 3, "xipin": 3, "xuancheng": 3, "yichang": 3, "wan": 3, "yunfei": 3, "yuqiong": 3, "zhenru": 3, "zhihao": 3, "10671": 3, "zc": 3, "siyuan": 3, "zhuang": [3, 5], "zhanghao": 3, "yonghao": 3, "zi": 3, "zhuohan": 3, "xing": [3, 5], "2306": 3, "05685": 3, "huggingface24": 3, "06": [3, 6], "metaai24": 3, "possibli": 4, "eliot": 4, "thumb": 4, "\u00be": 4, "max_output_token": 4, "4096": 4, "16384": 4, "contrari": 4, "surpass": 4, "truncat": 4, "max_input_token": 4, "input_cost_per_token": 4, "output_cost_per_token": 4, "11b": 4, "v1": [4, 5], "128000": 4, "5e": 4, "20241022": 4, "8192": 4, "200000": 4, "3e": 4, "0613": 4, "6e": 4, "gemini": 4, "flash": 4, "1048576": 4, "2097152": 4, "05e": 4, "incomplet": [4, 5], "abruptli": 4, "shallow": 4, "thorough": 4, "dissatisfact": 4, "frustrat": 4, "feasibl": 4, "10k": 4, "diagram": 4, "charactertextsplitt": 4, "tiktoken": 4, "sequenti": 4, "newlin": 4, "broadli": [4, 6], "cheap": 4, "speciali": 4, "nltk": 4, "spaci": 4, "recurs": 4, "divid": 4, "hierarch": 4, "talk": 4, "theme": 4, "splitter": 4, "get_chunk": 4, "chunk_siz": 4, "chunk_overlap": 4, "langchain_text_splitt": 4, "text_splitt": 4, "from_tiktoken_encod": 4, "split_text": 4, "persona": 4, "langchain_cor": [4, 6], "prompttempl": 4, "get_base_prompt_templ": 4, "base_prompt": [4, 6], "from_templ": 4, "llmchain": 4, "parser": [4, 6], "output_pars": 4, "stroutputpars": 4, "langchain_commun": 4, "chat_model": 4, "chatlitellm": 4, "get_llm_chain": 4, "prompt_templ": [4, 6], "llm_chain": [4, 6], "api_key_label": 4, "upper": 4, "_api_kei": 4, "get_dynamic_prompt_templ": 4, "get_dynamic_prompt_param": 4, "prompt_param": 4, "part_idx": 4, "total_part": 4, "chat_context": 4, "param": 4, "dynamic_prompt_param": 4, "introduct": 4, "concaten": 4, "generate_report": 4, "input_cont": 4, "llm_model_nam": 4, "report_part": 4, "num_part": 4, "dinam": 4, "priovid": 4, "invok": [4, 6], "cummul": 4, "max_chunk_s": 4, "max_chunk_overlap": 4, "readabl": 4, "apple_report": 4, "luation": 4, "disciplin": 4, "subhead": 4, "despit": [4, 6], "depth": 4, "overlook": 4, "easier": [4, 6], "preprocess": [4, 6], "necessit": 4, "meticul": 4, "bottleneck": 4, "mustafa": 4, "suleyman": 4, "infinit": 4, "fewer": 4, "condens": 4, "versatil": 4, "drive": [4, 6], "grace": 4, "fallback": 4, "empow": 4, "langchain24": 4, "how_to": 4, "immens": 5, "commonplac": 5, "penetr": 5, "daili": 5, "hartvigsen": 5, "societi": 5, "alarm": 5, "openli": 5, "lot": [5, 6], "dolli": 5, "v2": 5, "llama2": [5, 6], "13b": 5, "siam": 5, "edgington": 5, "phenomenon": [5, 6], "jailbreak": 5, "promptcraft": 5, "stealth": 5, "subtl": 5, "trigger": 5, "subtleti": 5, "exception": 5, "phrase": 5, "evad": 5, "hqve": 5, "frer": 5, "hplidai": 5, "pl": 5, "hyperion": 5, "coast": 5, "redwood": 5, "tallest": 5, "tree": [5, 6], "routin": 5, "underscor": 5, "furthermor": [5, 6], "mathematician": 5, "vital": 5, "semin": 5, "bengio": 5, "yoshua": 5, "generalist": 5, "injustic": 5, "inequ": 5, "undermin": 5, "perpetu": 5, "displac": 5, "eros": 5, "realiti": 5, "fake": 5, "deepfak": 5, "distrust": 5, "cyberattack": 5, "spread": 5, "disinform": 5, "inadvert": 5, "signal": 5, "interven": 5, "irrevers": 5, "uncheck": 5, "catastroph": 5, "extinct": 5, "race": 5, "incentiv": 5, "shortcut": 5, "behind": 5, "stress": 5, "urgent": 5, "reorient": 5, "prejudic": 5, "dataset": [5, 6], "inequalities1": 5, "gallego": 5, "leak": 5, "poison": 5, "intention": 5, "inject": 5, "mislead": 5, "bhy": 5, "geoffrei": 5, "hinton": 5, "pieter": 5, "abbeel": 5, "trevor": 5, "darrel": 5, "yuval": 5, "harari": 5, "ya": 5, "lan": 5, "shai": 5, "shalev": 5, "gillian": 5, "hadfield": 5, "clune": 5, "tegan": 5, "maharaj": 5, "hutter": 5, "at\u0131l\u0131m": 5, "g\u00fcne\u015f": 5, "baydin": 5, "sheila": 5, "mcilraith": 5, "qiqi": 5, "ashwin": 5, "acharya": 5, "anca": 5, "dragan": 5, "philip": 5, "torr": 5, "russel": 5, "kahneman": 5, "brauner": 5, "s\u00f6ren": 5, "mindermann": 5, "amid": 5, "384": 5, "6698": 5, "1126": 5, "adn0117": 5, "pdf": 5, "bbc": 5, "emili": 5, "braca": 5, "israel": 5, "carter": 5, "hafsa": 5, "kanchwala": 5, "khojasteh": 5, "charli": 5, "landow": 5, "luo": 5, "magarelli": 5, "mirin": 5, "averi": 5, "moyer": 5, "kayla": 5, "simpson": 5, "amelia": 5, "skawinski": 5, "heverin": 5, "23308": 5, "bmc": 5, "dillon": 5, "brendan": 5, "murphi": 5, "Will": 5, "cai": [5, 6], "khachaturov": 5, "gleav": 5, "kellin": 5, "pelrin": 5, "2408": [5, 6], "02946": 5, "edg24": 5, "grb": 5, "rossi": 5, "joe": 5, "barrow": 5, "mehrab": 5, "tanjim": 5, "sungchul": 5, "franck": 5, "dernoncourt": 5, "ruiyi": 5, "nesreen": 5, "2309": 5, "00770": 5, "hgp": 5, "saadia": 5, "hamid": 5, "palangi": 5, "dipankar": 5, "ec": 5, "kamar": 5, "oxi": 5, "adversari": 5, "smaranda": 5, "muresan": 5, "preslav": 5, "nakov": 5, "alin": 5, "villavicencio": 5, "editor": 5, "60th": 5, "linguist": 5, "3309": 5, "3326": 5, "dublin": 5, "aclanthologi": 5, "acl": 5, "18653": 5, "hym": 5, "weijiang": 5, "weitao": 5, "weihong": 5, "zhangyin": 5, "haotian": 5, "qianglong": 5, "weihua": 5, "xiaocheng": 5, "bing": 5, "ting": 5, "taxonomi": 5, "dx": 5, "1145": [5, 6], "3703155": 5, "oaa": 5, "adler": 5, "ahmad": 5, "ilg": 5, "akkaya": 5, "florencia": 5, "leoni": 5, "aleman": 5, "janko": 5, "altenschmidt": 5, "altman": 5, "shyamal": 5, "anadkat": 5, "avila": 5, "valeri": 5, "balcom": 5, "baltescu": 5, "haim": 5, "belgum": 5, "irwan": 5, "bello": 5, "jake": 5, "berdin": 5, "bernadett": 5, "shapiro": 5, "berner": 5, "lenni": 5, "bogdonoff": 5, "boiko": 5, "madelain": 5, "boyd": 5, "luisa": 5, "brakman": 5, "button": 5, "rosi": 5, "campbel": 5, "cann": 5, "brittani": 5, "carei": 5, "carlson": 5, "rori": 5, "carmichael": 5, "che": 5, "foti": 5, "sulli": 5, "rubi": 5, "chess": 5, "chester": 5, "cho": 5, "hyung": 5, "won": 5, "chung": 5, "jeremiah": 5, "currier": 5, "yunx": 5, "cori": 5, "decareaux": 5, "degri": 5, "deutsch": 5, "devil": 5, "dhar": 5, "steve": 5, "dowl": 5, "dun": 5, "adrien": 5, "ecoffet": 5, "atti": 5, "eleti": 5, "tyna": 5, "elound": 5, "farhi": 5, "niko": 5, "sim\u00f3n": 5, "posada": 5, "fishman": 5, "juston": 5, "isabella": 5, "fulford": 5, "georg": 5, "gibson": 5, "vik": 5, "tarun": 5, "gogineni": 5, "goh": 5, "rapha": 5, "gontijo": 5, "lope": 5, "gordon": 5, "morgan": 5, "grafstein": 5, "yufei": 5, "guo": 5, "hallaci": 5, "heaton": 5, "johann": 5, "heideck": 5, "hickei": 5, "wade": 5, "hoeschel": 5, "brandon": [5, 6], "houghton": 5, "kenni": 5, "hsu": 5, "shengli": 5, "xin": 5, "joost": 5, "huizinga": 5, "shawn": 5, "joann": 5, "jang": 5, "roger": 5, "haozhun": 5, "shino": 5, "jomoto": 5, "billi": 5, "jonn": 5, "tomer": 5, "kaftan": 5, "\u0142ukasz": 5, "kamali": 5, "ingmar": 5, "kanitscheid": 5, "tabarak": 5, "khan": 5, "logan": 5, "kilpatrick": 5, "jong": 5, "wook": 5, "christina": 5, "yongjik": 5, "hendrik": 5, "kirchner": 5, "kiro": 5, "matt": 5, "kokotajlo": 5, "kondraciuk": 5, "kondrich": 5, "konstantinidi": 5, "kosic": 5, "vishal": 5, "kuo": 5, "lamp": 5, "ikai": 5, "teddi": 5, "jade": 5, "leung": 5, "chak": 5, "ming": 5, "lim": 5, "molli": 5, "mateusz": 5, "litwin": 5, "theresa": 5, "lopez": 5, "patricia": 5, "lue": 5, "makanju": 5, "malfacini": 5, "markov": 5, "yaniv": 5, "markovski": 5, "bianca": 5, "mayn": 5, "mckinnei": 5, "christin": 5, "mcleavei": 5, "mcmillan": 5, "mcneil": 5, "aalok": 5, "menick": 5, "andrei": 5, "mishchenko": 5, "vinni": 5, "monaco": 5, "mu": 5, "murk": 5, "m\u00e9ly": 5, "ashvin": 5, "nair": 5, "reiichiro": 5, "nakano": 5, "rajeev": 5, "nayak": 5, "arvind": 5, "neelakantan": 5, "ngo": 5, "hyeonwoo": 5, "noh": 5, "cullen": 5, "keef": 5, "jakub": 5, "pachocki": 5, "palermo": 5, "ashlei": 5, "pantuliano": 5, "joel": 5, "parish": 5, "emi": 5, "parparita": 5, "passo": 5, "perelman": 5, "belbut": 5, "pere": 5, "pokorni": 5, "pokrass": 5, "vitchyr": 5, "pong": 5, "tolli": 5, "powel": 5, "bori": 5, "proehl": 5, "rae": 5, "ramesh": 5, "raymond": 5, "franci": 5, "kendra": 5, "rimbach": 5, "carl": 5, "rotst": 5, "roussez": 5, "saltarelli": 5, "ted": 5, "sander": 5, "schnurr": 5, "selsam": 5, "kyla": 5, "sheppard": 5, "toki": 5, "sherbakov": 5, "jessica": 5, "shieh": 5, "shoker": 5, "pranav": 5, "szymon": 5, "sidor": 5, "sigler": 5, "sitkin": 5, "sokolowski": 5, "natali": 5, "staudach": 5, "madelein": 5, "tootoonchian": 5, "tseng": 5, "preston": 5, "tuggl": 5, "turlei": 5, "juan": 5, "cer\u00f3n": 5, "urib": 5, "vallon": 5, "vijayvergiya": 5, "justin": 5, "jai": 5, "alvin": 5, "ward": 5, "cj": 5, "weinmann": 5, "akila": 5, "welihinda": 5, "jiayi": 5, "weng": 5, "lilian": 5, "wiethoff": 5, "willner": 5, "wolrich": 5, "lauren": 5, "workman": 5, "sherwin": 5, "yoo": 5, "zeller": 5, "shengjia": 5, "juntang": 5, "zhuk": 5, "2303": 5, "08774": 5, "vsk": 5, "kannappan": 5, "simplesafetytest": 5, "2311": 5, "08370": 5, "zyi": 5, "shune": 5, "lyumanshan": 5, "jingyu": 5, "shui": 5, "haobin": 5, "pengfei": 5, "hewu": 5, "ghost": 5, "14931": 5, "zho24": 5, "qinghua": 5, "julia": 6, "easili": 6, "response_cont": 6, "wow": 6, "breakdown": 6, "impress": 6, "huge": 6, "serious": 6, "is_json": 6, "myjson": 6, "trial": 6, "wrangl": 6, "hoc": 6, "streamlin": 6, "unwant": 6, "overflow": 6, "overwhelm": 6, "twitter": 6, "youtub": 6, "blueprint": 6, "nativ": 6, "json_format": 6, "person1": 6, "q1": 6, "person2": 6, "nest": 6, "todai": 6, "thellm": 6, "unend": 6, "whitespac": 6, "forget": 6, "throw": 6, "somewher": 6, "json_object": 6, "circul": 6, "vertex": 6, "worri": 6, "enum": 6, "simpler": 6, "secextract": 6, "mentioned_ent": 6, "mentioned_plac": 6, "extract_from_sec_fil": 6, "sec_filing_text": 6, "hint": 6, "prompt_extract": 6, "sec_extract": 6, "washington": 6, "usabl": 6, "beg": 6, "with_structured_output": 6, "runnabl": 6, "typeddict": 6, "qu": 6, "langchain_openai": 6, "chatopenai": 6, "chatprompttempl": 6, "extract_from_sec_filing_langchain": 6, "structured_llm": 6, "from_messag": 6, "sec_extraction_langchain": 6, "hood": 6, "logit": 6, "willard": 6, "louf": 6, "reformul": 6, "finit": 6, "fsm": 6, "s_": 6, "s_t": 6, "s_1": 6, "mask": 6, "tild": 6, "odot": 6, "rightarrow": 6, "boolean": 6, "wise": 6, "formul": 6, "regex": 6, "thien": 6, "automaton": 6, "dfa": 6, "decod": 6, "outgo": 6, "renorm": 6, "yy": 6, "nn": 6, "ever": 6, "aa": 6, "lwai": 6, "prop": 6, "yynnaa": 6, "malform": 6, "sec_extraction_outlin": 6, "zsp": 6, "zicorp": 6, "cpp": 6, "gbnf": 6, "ggml": 6, "bnf": 6, "ggerganov": 6, "accomplish": 6, "backu": 6, "naur": 6, "wikipedia": 6, "contributor": 6, "curl": 6, "fssl": 6, "sh": 6, "extract_entities_from_sec_fil": 6, "ollama_structured_output_prompt_suffix": 6, "ollama_structured_output_temperatur": 6, "uncensor": 6, "model_json_schema": 6, "response_json": 6, "wrapper": 6, "exllama2": 6, "mlx": 6, "lm": 6, "medium": 6, "know": 6, "chanc": 6, "correctli": 6, "nonetheless": 6, "wrap": 6, "gemma": 6, "uncov": 6, "wors": 6, "extran": 6, "dispar": 6, "preval": 6, "rapidli": 6, "speak": 6, "aider": 6, "outweigh": 6, "rebutt": 6, "argu": 6, "reproduct": 6, "paint": 6, "pictur": 6, "verif": 6, "dottxt": 6, "flaw": 6, "uneven": 6, "didn": 6, "conflat": 6, "argument": 6, "drawback": 6, "unlock": 6, "wider": 6, "thank": 6, "pfiffer": 6, "aid24": 6, "dot24": 6, "demo": 6, "gge24": 6, "blob": 6, "readm": 6, "llf": 6, "xieyang": 6, "frederick": 6, "fiannaca": 6, "terri": 6, "koo": 6, "dixon": 6, "ea": 6, "ny": 6, "usa": 6, "machineri": 6, "3613905": 6, "3650756": 6, "ln": 6, "xuan": 6, "hai": 6, "nguyen": 6, "ngoc": 6, "tiviati": 6, "hieu": 6, "dao": 6, "shafiq": 6, "joti": 6, "kenji": 6, "kawaguchi": 6, "nanci": 6, "min": 6, "kan": 6, "08656": 6, "out24": 6, "twt": 6, "zhi": 6, "cheng": 6, "kuang": 6, "tsai": 6, "chieh": 6, "hung": 6, "yun": 6, "nung": 6, "02442": 6, "tt24": 6, "vivien": 6, "vivien000": 6, "wl23": 6, "r\u00e9mi": 6, "09702": 6, "wikipediacontributors24": 6, "wiktionari": 6, "naur_form": 6}, "objects": {}, "objtypes": {}, "objnames": {}, "titleterms": {"introduct": [0, 1, 2, 3, 5, 6], "content": [0, 2, 3, 4, 5, 6], "core": 0, "challeng": 0, "we": 0, "ll": 0, "address": 0, "A": [0, 1, 2], "practic": [0, 1, 6], "approach": 0, "an": 0, "open": [0, 1], "sourc": [0, 1], "book": 0, "note": [0, 2], "perspect": 0, "who": 0, "thi": 0, "i": 0, "For": 0, "outcom": 0, "prerequisit": 0, "set": 0, "up": 0, "your": 0, "environ": 0, "python": 0, "setup": [0, 2], "api": [0, 6], "kei": [0, 3, 4], "configur": 0, "code": 0, "repositori": 0, "troubleshoot": 0, "common": 0, "issu": 0, "about": 0, "author": 0, "": 0, "tame": 1, "llm": [1, 3, 5], "guid": 1, "pitfal": 1, "softwar": [1, 3], "chapter": 1, "1": [1, 4], "2": [1, 4], "wrestl": [1, 6], "structur": [1, 6], "output": [1, 4, 6], "3": [1, 4], "input": 1, "size": [1, 4], "length": [1, 4], "limit": [1, 4], "4": [1, 4], "5": 1, "The": [1, 3], "eval": [1, 3], "gap": [1, 3], "6": 1, "hallucin": 1, "realiti": 1, "7": 1, "prefer": [1, 2], "base": [1, 2, 3, 4], "align": [1, 2], "8": 1, "cost": [1, 4], "factor": [1, 5], "9": 1, "break": 1, "free": 1, "from": [1, 2], "cloud": 1, "provid": [1, 6], "appendix": 1, "tool": [1, 3, 6], "resourc": 1, "citat": [1, 2], "raw": 2, "capabl": 2, "On": 2, "misalign": 2, "languag": 2, "model": [2, 3, 4], "human": 2, "supervis": 2, "fine": 2, "tune": 2, "sft": 2, "augment": 2, "case": 2, "studi": 2, "polici": 2, "experiment": 2, "deliver": 2, "smollm2": 2, "dataset": [2, 3], "synthet": 2, "gener": [2, 3, 4, 5], "user": [2, 6], "prompt": [2, 4, 6], "reject": 2, "respons": 2, "chosen": 2, "dpo": 2, "optim": 2, "data": [2, 5], "prepar": 2, "vibe": 2, "check": 2, "evalu": [2, 3], "discuss": [2, 4, 6], "refer": [2, 3, 4, 5, 6], "non": 3, "determinist": 3, "machin": 3, "emerg": 3, "properti": 3, "problem": [3, 4, 6], "statement": [3, 4, 6], "tradit": 3, "v": 3, "design": 3, "applic": 3, "test": 3, "requir": 3, "matrix": 3, "conceptu": 3, "overview": 3, "consider": [3, 4], "metric": 3, "task": 3, "benchmark": 3, "leaderboard": 3, "lightev": 3, "mmlu": 3, "econometr": 3, "sampl": 3, "famili": 3, "us": 3, "langsmith": 3, "promptfoo": 3, "comparison": [3, 4, 6], "conclus": [3, 4, 6], "what": 4, "ar": 4, "token": 4, "across": 4, "chunk": 4, "contextu": 4, "link": 4, "long": 4, "form": 4, "step": 4, "write": 4, "templat": 4, "construct": 4, "dynam": 4, "paramet": 4, "report": 4, "exampl": 4, "usag": 4, "implic": 4, "futur": 4, "safeti": 5, "risk": 5, "ai": 5, "amplifi": 5, "exist": 5, "harm": 5, "novel": 5, "associ": 5, "autonom": 5, "exacerb": 5, "specif": [5, 6], "integr": 5, "bia": 5, "privaci": 5, "secur": 5, "need": 6, "solut": 6, "strategi": 6, "techniqu": 6, "One": 6, "shot": 6, "json": 6, "mode": 6, "langchain": 6, "outlin": 6, "ollama": 6, "compar": 6, "framework": 6, "best": 6, "research": 6, "ongo": 6, "debat": 6, "acknowledg": 6}, "envversion": {"sphinx.domains.c": 2, "sphinx.domains.changeset": 1, "sphinx.domains.citation": 1, "sphinx.domains.cpp": 8, "sphinx.domains.index": 1, "sphinx.domains.javascript": 2, "sphinx.domains.math": 2, "sphinx.domains.python": 3, "sphinx.domains.rst": 2, "sphinx.domains.std": 2, "sphinx.ext.intersphinx": 1, "sphinxcontrib.bibtex": 9, "sphinx": 57}, "alltitles": {"Introduction": [[0, "introduction"], [2, "introduction"], [2, "id22"], [3, "introduction"], [5, "introduction"], [6, "introduction"]], "Contents": [[0, "contents"], [2, "contents"], [3, "contents"], [4, "contents"], [5, "contents"], [6, "contents"]], "Core Challenges We\u2019ll Address": [[0, "core-challenges-we-ll-address"]], "A Practical Approach": [[0, "a-practical-approach"]], "An Open Source Approach": [[0, "an-open-source-approach"]], "Open Source Book": [[0, "open-source-book"]], "A Note on Perspective": [[0, "a-note-on-perspective"]], "Who This Book Is For": [[0, "who-this-book-is-for"]], "Outcomes": [[0, "outcomes"]], "Prerequisites": [[0, "prerequisites"]], "Setting Up Your Environment": [[0, "setting-up-your-environment"]], "Python Environment Setup": [[0, "python-environment-setup"]], "API Keys Configuration": [[0, "api-keys-configuration"]], "Code Repository": [[0, "code-repository"]], "Troubleshooting Common Issues": [[0, "troubleshooting-common-issues"]], "About the Author(s)": [[0, "about-the-author-s"]], "Taming LLMs": [[1, "taming-llms"]], "A Practical Guide to LLM Pitfalls with Open Source Software": [[1, "a-practical-guide-to-llm-pitfalls-with-open-source-software"]], "Chapter 1: Introduction": [[1, "chapter-1-introduction"]], "Chapter 2: Wrestling with Structured Output": [[1, "chapter-2-wrestling-with-structured-output"]], "Chapter 3: Input Size and Length Limitations": [[1, "chapter-3-input-size-and-length-limitations"]], "Chapter 4: Output Size and Length Limitations": [[1, "chapter-4-output-size-and-length-limitations"]], "Chapter 5: The Evals Gap": [[1, "chapter-5-the-evals-gap"]], "Chapter 6: Hallucination: The Reality Gap": [[1, "chapter-6-hallucination-the-reality-gap"]], "Chapter 7: Preference-based Alignment": [[1, "chapter-7-preference-based-alignment"]], "Chapter 8: The Cost Factor": [[1, "chapter-8-the-cost-factor"]], "Chapter 9: Breaking Free from Cloud Providers": [[1, "chapter-9-breaking-free-from-cloud-providers"]], "Appendix A: Tools and Resources": [[1, "appendix-a-tools-and-resources"]], "Citation": [[1, "citation"], [2, "citation"]], "Preference-Based Alignment": [[2, "preference-based-alignment"]], "From Raw Capabilities to Preference Alignment": [[2, "from-raw-capabilities-to-preference-alignment"]], "On the Misalignment of Language Models": [[2, "on-the-misalignment-of-language-models"]], "Aligning Language Models with Human Preferences": [[2, "aligning-language-models-with-human-preferences"]], "Supervised Fine-Tuning (SFT) for Model Alignment": [[2, "supervised-fine-tuning-sft-for-model-alignment"]], "Augmenting SFT with Human Preferences": [[2, "augmenting-sft-with-human-preferences"]], "Case Study: Aligning a Language Model to a Policy": [[2, "case-study-aligning-a-language-model-to-a-policy"]], "Experimental Setup": [[2, "experimental-setup"]], "Deliverables": [[2, "deliverables"]], "A Note on smolLM2 Models": [[2, "a-note-on-smollm2-models"]], "Policy": [[2, "policy"]], "Preference Dataset - Synthetic Dataset Generation": [[2, "preference-dataset-synthetic-dataset-generation"]], "User Prompts": [[2, "user-prompts"]], "Rejected Responses": [[2, "rejected-responses"]], "Chosen Responses": [[2, "chosen-responses"]], "Generate DPO Dataset": [[2, "generate-dpo-dataset"]], "DPO-Based Optimization": [[2, "dpo-based-optimization"]], "Data Preparation": [[2, "data-preparation"]], "Fine-Tuning": [[2, "fine-tuning"]], "Vibe Check": [[2, "vibe-check"]], "Alignment Evaluation": [[2, "alignment-evaluation"]], "Discussion": [[2, "discussion"], [4, "discussion"], [6, "discussion"]], "References": [[2, "references"], [3, "references"], [4, "references"], [5, "references"], [6, "references"]], "The Evals Gap": [[3, "the-evals-gap"]], "Non-Deterministic Generative Machines": [[3, "non-deterministic-generative-machines"]], "Emerging Properties": [[3, "emerging-properties"]], "Problem Statement": [[3, "problem-statement"], [4, "problem-statement"], [6, "problem-statement"]], "Evals of Traditional Software vs LLMs": [[3, "evals-table"]], "Evals Design": [[3, "evals-design"]], "LLM Application Testing Requirements Matrix": [[3, "validation-requirements"]], "Conceptual Overview": [[3, "conceptual-overview"]], "Design Considerations": [[3, "design-considerations"]], "Metrics": [[3, "metrics"]], "Key Metrics for Evaluating Generative Tasks": [[3, "key-metrics"]], "Evaluators": [[3, "evaluators"]], "Model-Based Evaluation": [[3, "model-based-evaluation"]], "Evaluating Evaluators": [[3, "evaluating-evaluators"]], "Benchmarks and Leaderboards": [[3, "benchmarks-and-leaderboards"]], "Tools": [[3, "tools"]], "LightEval": [[3, "lighteval"]], "MMLU Econometrics Task Dataset sample": [[3, "mmlu-econometrics"]], "Model Families Evaluated Using LightEval": [[3, "model-families"]], "LangSmith": [[3, "langsmith"]], "PromptFoo": [[3, "promptfoo"]], "Comparison": [[3, "comparison"]], "Comparison of Lighteval, LangSmith, and Promptfoo": [[3, "tool-comparison"]], "Conclusion": [[3, "conclusion"], [4, "conclusion"], [6, "conclusion"]], "Output Size Limitations": [[4, "output-size-limitations"]], "What are Token Limits?": [[4, "what-are-token-limits"]], "Token Cost and Length Limitation Comparison Across Key Models": [[4, "token-cost-table"]], "Content Chunking with Contextual Linking": [[4, "content-chunking-with-contextual-linking"]], "Generating long-form content": [[4, "generating-long-form-content"]], "Step 1: Chunking the Content": [[4, "step-1-chunking-the-content"]], "Step 2: Writing the Base Prompt Template": [[4, "step-2-writing-the-base-prompt-template"]], "Step 3: Constructing Dynamic Prompt Parameters": [[4, "step-3-constructing-dynamic-prompt-parameters"]], "Step 4: Generating the Report": [[4, "step-4-generating-the-report"]], "Example Usage": [[4, "example-usage"]], "Implications": [[4, "implications"]], "Future Considerations": [[4, "future-considerations"]], "Safety": [[5, "safety"]], "Safety Risks": [[5, "safety-risks"]], "General AI Safety Risks": [[5, "general-ai-safety-risks"]], "Amplified Existing Harms and Novel Risks": [[5, "amplified-existing-harms-and-novel-risks"]], "Risks Associated with Autonomous AI": [[5, "risks-associated-with-autonomous-ai"]], "Exacerbating Factors": [[5, "exacerbating-factors"]], "LLMs Specific Safety Risks": [[5, "llms-specific-safety-risks"]], "Data Integrity and Bias": [[5, "data-integrity-and-bias"]], "Privacy and Security": [[5, "privacy-and-security"]], "Wrestling with Structured Output": [[6, "wrestling-with-structured-output"]], "User Needs": [[6, "user-needs"]], "Solutions": [[6, "solutions"]], "Strategies": [[6, "strategies"]], "Techniques and Tools": [[6, "techniques-and-tools"]], "One-Shot Prompts": [[6, "one-shot-prompts"]], "Structured Output with Provider-Specific APIs": [[6, "structured-output-with-provider-specific-apis"]], "JSON Mode": [[6, "json-mode"]], "LangChain": [[6, "langchain"]], "Outlines": [[6, "outlines"]], "Ollama": [[6, "ollama"]], "Comparing Solutions": [[6, "comparing-solutions"]], "Structured Output Frameworks Comparison": [[6, "structured-output-frameworks"]], "Best Practices": [[6, "best-practices"]], "Research and Ongoing Debate": [[6, "research-and-ongoing-debate"]], "Acknowledgements": [[6, "acknowledgements"]]}, "indexentries": {}}) \ No newline at end of file +Search.setIndex({"docnames": ["markdown/intro", "markdown/toc", "notebooks/alignment", "notebooks/evals", "notebooks/output_size_limit", "notebooks/safety", "notebooks/structured_output"], "filenames": ["markdown/intro.md", "markdown/toc.md", "notebooks/alignment.ipynb", "notebooks/evals.ipynb", "notebooks/output_size_limit.ipynb", "notebooks/safety.ipynb", "notebooks/structured_output.ipynb"], "titles": ["1. Introduction", "Taming LLMs", "6. Preference-Based Alignment", "4. The Evals Gap", "2. Output Size Limitations", "5. Safety", "3. Wrestling with Structured Output"], "terms": {"am": 0, "alwai": [0, 2, 3, 6], "do": [0, 2, 3, 4, 5, 6], "which": [0, 2, 3, 4, 5, 6], "cannot": [0, 2, 3], "order": [0, 2, 3, 5, 6], "mai": [0, 2, 3, 4, 5, 6], "learn": [0, 2, 3], "how": [0, 2, 3, 4, 5, 6], "pablo": [0, 3], "picasso": 0, "In": [0, 2, 3, 4, 5, 6], "recent": [0, 2, 3, 5, 6], "year": [0, 1, 2, 3, 4, 6], "larg": [0, 1, 2, 3, 4, 5, 6], "languag": [0, 1, 3, 4, 5, 6], "model": [0, 1, 5, 6], "llm": [0, 2, 4, 6], "have": [0, 2, 3, 4, 5, 6], "emerg": [0, 1, 2, 5, 6], "transform": [0, 2, 3, 6], "forc": [0, 3, 6], "technologi": [0, 3, 4, 5, 6], "promis": [0, 2, 3, 5], "revolution": 0, "build": [0, 1, 2, 3, 4, 5, 6], "product": [0, 1, 2, 3, 6], "interact": [0, 2, 3, 4, 5, 6], "comput": [0, 2, 3, 4, 5, 6], "from": [0, 3, 4, 6], "chatgpt": [0, 2, 6], "github": [0, 1, 2, 3, 5, 6], "copilot": 0, "claud": [0, 2, 3, 4], "artifact": 0, "system": [0, 2, 3, 4, 5, 6], "captur": [0, 2, 3, 5], "public": [0, 2, 3, 5], "imagin": 0, "spark": 0, "gold": [0, 2, 3, 5], "rush": 0, "ai": [0, 2, 3, 6], "power": [0, 1, 2, 3, 4, 5, 6], "applic": [0, 1, 2, 4, 5, 6], "howev": [0, 2, 3, 4, 5, 6], "beneath": 0, "surfac": [0, 3], "technolog": [0, 3, 5], "revolut": 0, "li": [0, 2, 3, 5], "complex": [0, 2, 3, 4, 6], "landscap": [0, 2, 3], "practition": [0, 3], "must": [0, 2, 3, 4, 5], "navig": [0, 1, 3], "focus": [0, 2, 3, 4, 5, 6], "bring": [0, 2], "awar": [0, 3, 4], "limit": [0, 2, 3, 5, 6], "har": [0, 1, 3, 4], "solut": [0, 1, 3, 4, 5], "overcom": [0, 3, 4], "them": [0, 2, 3, 4, 5, 6], "robust": [0, 2, 3, 4, 5, 6], "It": [0, 2, 3, 4, 5, 6], "offer": [0, 2, 3, 4, 5, 6], "critic": [0, 1, 2, 3, 4, 5, 6], "implement": [0, 1, 2, 3, 4, 6], "back": [0, 3, 6], "reproduc": [0, 1, 3], "exampl": [0, 1, 2, 3, 5, 6], "while": [0, 1, 2, 3, 4, 5, 6], "mani": [0, 2, 3, 4, 6], "resourc": [0, 2, 3, 4, 5], "cover": [0, 2, 3, 4, 5], "capabl": [0, 1, 3, 4, 5, 6], "specif": [0, 1, 2, 3, 4], "hidden": 0, "pitfal": [0, 2], "engin": [0, 1, 2, 3, 5, 6], "technic": [0, 1, 2, 3, 4, 6], "manag": [0, 1, 3, 4, 5, 6], "face": [0, 2, 3, 5], "when": [0, 1, 2, 3, 4, 5, 6], "comprehens": [0, 1, 2, 3, 4, 5, 6], "guid": [0, 2, 3, 5, 6], "leverag": [0, 2, 3, 4, 5, 6], "battl": [0, 1], "test": [0, 1, 2, 5, 6], "tool": [0, 2, 4], "throughout": [0, 3, 4, 6], "tackl": [0, 2, 3], "follow": [0, 2, 3, 4, 5, 6], "non": [0, 1, 2, 5, 6], "exhaust": 0, "list": [0, 2, 3, 4, 6], "structur": [0, 2, 3, 4, 5], "un": 0, "reliabl": [0, 2, 3, 5, 6], "struggl": [0, 3, 5, 6], "maintain": [0, 2, 3, 4, 5, 6], "consist": [0, 2, 3, 4, 5, 6], "output": [0, 2, 3, 5], "format": [0, 2, 3, 4, 6], "complic": 0, "integr": [0, 2, 3, 6], "larger": [0, 2, 3, 4, 6], "make": [0, 2, 3, 4, 6], "error": [0, 2, 3, 6], "handl": [0, 1, 2, 3, 4, 5, 6], "more": [0, 2, 3, 4, 5, 6], "size": [0, 2, 3, 6], "length": [0, 2, 3, 6], "constraint": [0, 1, 2, 3, 4, 5, 6], "strict": [0, 5, 6], "token": [0, 1, 2, 3, 6], "both": [0, 2, 3, 5], "input": [0, 2, 3, 4, 5, 6], "requir": [0, 2, 4, 5, 6], "care": [0, 2, 3, 5, 6], "chunk": [0, 1, 2], "strategi": [0, 1, 2, 3, 4, 5], "long": [0, 1, 2, 3, 5, 6], "form": [0, 1, 2, 3, 6], "effect": [0, 2, 3, 4, 5, 6], "tradit": [0, 2, 5], "softwar": [0, 6], "methodologi": [0, 2, 3, 5, 6], "break": [0, 2, 3, 4, 5], "down": [0, 3, 4, 5], "deal": [0, 2], "determinist": [0, 1, 6], "gener": [0, 1, 6], "new": [0, 1, 2, 3, 4, 5, 6], "hallucin": [0, 2, 3, 5, 6], "These": [0, 2, 3, 4, 5, 6], "can": [0, 2, 3, 4, 5, 6], "plausibl": [0, 5], "sound": [0, 5], "entir": [0, 3, 4, 6], "fabric": [0, 3, 5], "inform": [0, 2, 3, 4, 5, 6], "creat": [0, 2, 3, 4, 5, 6], "signific": [0, 2, 3, 4, 5, 6], "risk": [0, 2, 3, 4], "safeti": [0, 2, 3, 6], "align": [0, 3, 4, 5, 6], "harm": [0, 2, 3], "bias": [0, 2, 3, 5, 6], "inappropri": [0, 2], "safeguard": [0, 3, 5], "monitor": [0, 1, 2, 3, 5], "ensur": [0, 2, 3, 4, 5, 6], "safe": [0, 2, 3, 5, 6], "deploy": [0, 1, 2, 3, 5, 6], "cost": [0, 2, 3, 6], "optim": [0, 1, 3, 4, 5], "The": [0, 2, 4, 5, 6], "financi": [0, 2, 3, 4, 5, 6], "oper": [0, 2, 3, 4, 5, 6], "base": [0, 6], "quickli": [0, 2, 4], "becom": [0, 3, 5, 6], "prohibit": [0, 2, 3], "without": [0, 2, 3, 4, 5, 6], "observ": [0, 2, 3, 6], "vendor": [0, 1, 3], "lock": [0, 1], "cloud": [0, 2, 3, 6], "provid": [0, 2, 3, 4, 5], "depend": [0, 2, 3, 6], "through": [0, 1, 2, 3, 4, 5, 6], "proprietari": [0, 2, 6], "infrastructur": 0, "difficult": [0, 2, 3, 5], "switch": 0, "self": [0, 1, 2, 3, 5], "host": [0, 1, 3, 5], "take": [0, 1, 2, 3, 4, 6], "hand": [0, 4, 6], "focu": [0, 1, 2, 3, 4, 5, 6], "access": [0, 2, 3, 4, 5, 6], "all": [0, 2, 3, 4, 5, 6], "ar": [0, 1, 2, 3, 5, 6], "fulli": [0, 2, 3, 4, 5], "document": [0, 3, 4, 5, 6], "allow": [0, 3, 4, 5, 6], "reader": [0, 1], "replic": [0, 3, 5, 6], "result": [0, 2, 3, 4, 5, 6], "exactli": [0, 3, 6], "design": [0, 1, 2, 4, 5, 6], "run": [0, 2, 3, 5, 6], "consum": [0, 2, 3, 6], "grade": [0, 2, 3, 5], "hardwar": [0, 2, 3], "expens": [0, 2, 3], "avail": [0, 2, 3, 4, 5, 6], "notebook": [0, 2, 6], "modifi": [0, 3], "extend": [0, 2, 3, 6], "built": [0, 3, 6], "us": [0, 2, 4, 5, 6], "free": [0, 2, 3, 5], "everyon": [0, 3], "minim": [0, 2, 3, 5, 6], "framework": [0, 2, 3, 5], "wai": [0, 2, 3, 4, 5, 6], "priorit": [0, 2, 3, 5], "transpar": [0, 2, 3, 5, 6], "visibl": [0, 3], "being": [0, 2, 3, 5], "better": [0, 1, 2, 3, 4, 5], "understand": [0, 1, 2, 3, 4, 5, 6], "custom": [0, 2, 3, 5], "flexibl": [0, 3, 4, 5, 6], "adapt": [0, 2, 3, 4, 5], "case": [0, 1, 3, 4, 6], "unlik": [0, 2, 3], "black": [0, 2], "box": 0, "commerci": [0, 2, 3, 6], "most": [0, 2, 3, 4, 5, 6], "freeli": [0, 6], "foster": [0, 2, 3, 5, 6], "reduc": [0, 2, 3, 4, 5, 6], "independ": [0, 3, 5, 6], "freedom": [0, 6], "architectur": [0, 2, 3, 4, 5, 6], "decis": [0, 2, 3, 5, 6], "keep": [0, 2, 3, 4, 5], "principl": [0, 2, 3, 5], "itself": [0, 2, 3, 5], "live": [0, 3, 5], "evolv": [0, 2, 3, 4, 5], "chang": [0, 2, 3, 5], "encourag": [0, 2, 3, 5, 6], "report": [0, 1, 2, 3, 5, 6], "suggest": [0, 2, 3, 5, 6], "improv": [0, 2, 3, 4, 5, 6], "contribut": [0, 3, 4, 5], "via": [0, 2, 3, 5, 6], "pull": 0, "request": [0, 2, 3, 4, 5, 6], "share": [0, 2, 3, 5, 6], "own": [0, 2, 3, 4, 5], "experi": [0, 2, 3, 4, 6], "commun": [0, 1, 2, 3, 5, 6], "propos": [0, 3, 5], "chapter": [0, 2, 3, 5], "section": [0, 2, 3, 4, 5, 6], "found": [0, 3, 6], "http": [0, 1, 2, 3, 4, 5, 6], "com": [0, 1, 2, 3, 4, 5, 6], "souzatharsi": [0, 1, 2], "tamingllm": [0, 1, 2], "whether": [0, 2, 3, 4, 6], "you": [0, 2, 3, 4, 6], "ve": 0, "typo": [0, 5], "want": [0, 2, 4, 6], "welcom": 0, "look": [0, 1, 2, 3], "our": [0, 2, 3, 4, 5, 6], "goal": [0, 2, 3, 4, 5], "discourag": 0, "enabl": [0, 2, 3, 4, 5, 6], "By": [0, 1, 2, 3, 4, 5, 6], "upfront": [0, 1], "equip": [0, 1, 3, 5], "avoid": [0, 2, 3, 6], "current": [0, 1, 2, 3, 4, 5, 6], "discours": [0, 1], "around": [0, 1, 2, 3, 4, 5, 6], "tend": [0, 1, 3, 5], "toward": [0, 2, 3, 5, 6], "extrem": [0, 2, 3, 5], "either": [0, 2, 3, 4, 5], "uncrit": 0, "enthusiasm": 0, "wholesal": [0, 3], "dismiss": 0, "differ": [0, 2, 3, 4, 5, 6], "rather": [0, 2, 3, 5], "than": [0, 2, 3, 5], "theoret": 0, "examin": [0, 2, 3, 4, 5, 6], "first": [0, 2, 3, 4, 6], "everi": [0, 3, 5], "concept": [0, 2, 3, 5], "illustr": [0, 2, 3, 4, 5, 6], "execut": [0, 3], "immedi": [0, 2, 3], "analysi": [0, 1, 2, 3, 4, 5], "balanc": [0, 2, 3, 4, 5, 6], "help": [0, 2, 3, 4, 5, 6], "intend": [0, 3, 5], "develop": [0, 2, 3, 4, 5, 6], "step": [0, 1, 2, 3, 5, 6], "insight": [0, 2, 3, 4, 5, 6], "along": [0, 2, 3], "guidanc": [0, 2, 6], "could": [0, 2, 3, 4, 5, 6], "derail": 0, "project": [0, 2, 3], "earli": [0, 2, 3, 5, 6], "befor": [0, 2, 3, 5, 6], "thei": [0, 2, 3, 4, 5, 6], "costli": [0, 3], "problem": [0, 1, 2], "too": [0, 2, 3, 4, 5], "late": [0, 2], "lifecycl": 0, "lead": [0, 2, 3, 4, 5, 6], "genai": [0, 2, 5], "initi": [0, 2, 3, 4, 5, 6], "leader": [0, 3], "advoc": [0, 5], "anyon": [0, 5], "seek": [0, 3, 5], "work": [0, 1, 2, 3, 4, 5, 6], "typic": [0, 2, 3, 4, 6], "job": [0, 3, 5], "role": [0, 2, 3, 4, 6], "platform": [0, 3, 4, 5, 6], "backend": [0, 2, 3], "exist": [0, 2, 3], "ml": 0, "transit": [0, 3, 4, 6], "overse": 0, "motiv": [0, 3, 6], "need": [0, 2, 3, 4, 5], "readi": [0, 3], "desir": [0, 2, 3, 5, 6], "perform": [0, 1, 2, 3, 4, 5, 6], "after": [0, 2, 3, 4, 5, 6], "read": [0, 2, 3, 4, 6], "implic": [0, 1, 2, 3], "recommend": [0, 2, 3, 4, 5, 6], "abl": [0, 2, 3, 4, 6], "deploi": [0, 2, 3, 4, 5], "proper": [0, 2, 5, 6], "realist": [0, 2, 5], "effort": [0, 3, 5, 6], "estim": [0, 3], "impact": [0, 2, 3, 4, 5, 6], "timelin": 0, "To": [0, 2, 3, 4, 5, 6], "should": [0, 2, 3, 4, 5, 6], "basic": [0, 2, 3, 4], "program": [0, 3], "knowledg": [0, 2, 3, 5], "introductori": [0, 1], "langchain": [0, 1, 3, 4], "e": [0, 2, 3, 4, 5, 6], "g": [0, 2, 3, 4, 5, 6], "chat": [0, 2, 3, 4, 6], "prompt": [0, 1, 3, 5], "templat": [0, 1, 3], "openai": [0, 2, 3, 6], "anthrop": [0, 6], "similar": [0, 2, 3, 6], "dive": 0, "here": [0, 1, 2, 3, 4, 5, 6], "get": [0, 2, 3, 4, 6], "start": [0, 2, 3, 5, 6], "activ": [0, 2, 3, 5], "virtual": [0, 3], "m": [0, 2, 3, 5, 6], "venv": 0, "tame": [0, 2], "env": [0, 2, 3, 4, 6], "bin": 0, "On": [0, 1, 3, 6], "window": [0, 1, 3], "script": 0, "instal": [0, 2, 3, 6], "packag": [0, 3, 6], "pip": [0, 2, 3, 6], "r": [0, 2, 3, 4, 5, 6], "txt": [0, 3, 4, 6], "file": [0, 2, 3, 4, 5, 6], "root": [0, 2], "directori": [0, 3], "add": [0, 2, 3, 4], "other": [0, 2, 3, 4, 5, 6], "sensit": [0, 2, 3, 5], "openai_api_kei": [0, 2], "your_openai_api_key_her": 0, "never": [0, 6], "commit": [0, 2, 3, 5], "version": [0, 2, 3, 5, 6], "control": [0, 2, 3, 5, 6], "contain": [0, 2, 3, 4, 5, 6], "kept": [0, 3], "privat": [0, 3], "clone": [0, 2], "companion": 0, "git": 0, "cd": 0, "If": [0, 2, 3, 6], "encount": [0, 1, 3], "rate": [0, 2, 3], "consid": [0, 2, 3, 4, 5, 6], "smaller": [0, 2, 3, 4, 6], "retri": [0, 6], "logic": [0, 2, 3, 4], "conflict": [0, 3], "try": [0, 2, 3, 6], "fresh": 0, "like": [0, 2, 3, 4, 5, 6], "poetri": 0, "check": [0, 3, 6], "page": [0, 3], "known": [0, 3, 5, 6], "now": [0, 2, 3, 4, 5, 6], "let": [0, 2, 3, 4, 6], "begin": [0, 2, 3, 5, 6], "explor": [0, 2, 3, 5, 6], "dr": 0, "tharsi": [0, 1, 2], "souza": [0, 1, 2], "scientist": 0, "special": [0, 3, 5, 6], "he": [0, 2, 3, 5], "lectur": 0, "columbia": 0, "univers": [0, 3, 5], "master": [0, 6], "scienc": [0, 2, 3, 5], "appli": [0, 2, 3, 4, 5, 6], "analyt": 0, "incom": [0, 3], "head": [0, 2, 3, 4], "equiti": [0, 3], "citadel": 0, "former": [0, 3], "senior": [0, 3], "vp": 0, "two": [0, 2, 3, 4, 6], "sigma": [0, 2], "invest": [0, 2, 3, 5, 6], "also": [0, 2, 3, 4, 5, 6], "enjoi": 0, "mentor": 0, "under": [0, 2, 3, 6], "repres": [0, 2, 3, 5, 6], "student": [0, 2], "profession": [0, 2, 3, 6], "divers": [0, 2, 3, 4, 5, 6], "global": [0, 3, 5], "ecosystem": [0, 3], "With": [0, 3], "over": [0, 1, 2, 3, 4, 5, 6], "15": [0, 3, 5, 6], "deliv": [0, 3], "across": [0, 2, 3, 5, 6], "startup": 0, "fortun": 0, "500": [0, 2, 3], "compani": [0, 2, 3, 4, 5, 6], "numer": [0, 3, 5], "scholarli": 0, "frequent": [0, 3, 6], "speaker": [0, 3], "academ": [0, 2, 3], "busi": [0, 3, 5], "confer": [0, 6], "ground": [0, 1, 2, 3], "background": [0, 3, 4], "draw": [0, 3, 5, 6], "scale": [0, 2, 3, 5, 6], "stage": [0, 5, 6], "major": [0, 2, 3, 5, 6], "institut": [0, 3, 5], "well": [0, 2, 3, 5, 6], "advis": [0, 2], "profit": [0, 3, 4, 6], "organ": [0, 2, 3, 4], "uniqu": [0, 2, 3, 5], "bridg": 0, "gap": [0, 2], "between": [0, 2, 3, 4, 5, 6], "potenti": [0, 2, 3, 4, 5, 6], "next": [0, 2, 3, 5, 6], "hold": [0, 2, 3], "ph": [0, 5], "d": [0, 2, 3, 5, 6], "ucl": 0, "london": 0, "phil": [0, 5], "sc": 0, "b": [0, 3, 5, 6], "sign": [1, 3, 5], "up": [1, 2, 3, 4, 6], "receiv": [1, 2, 3, 4, 6], "updat": [1, 2, 3, 4, 5, 6], "abstract": [1, 3, 6], "heavili": [1, 3, 5, 6], "gloss": 1, "fundament": [1, 3, 5, 6], "challeng": [1, 2, 3, 4, 5, 6], "convers": [1, 2, 3, 4, 5, 6], "thi": [1, 2, 3, 4, 5, 6], "book": [1, 3], "kei": [1, 2, 5, 6], "python": [1, 3, 4, 6], "proven": 1, "an": [1, 2, 3, 4, 5, 6], "yet": [1, 2, 3, 4, 5], "i": [1, 2, 3, 4, 5, 6], "concret": [1, 5], "unstructur": [1, 6], "context": [1, 2, 3, 4, 5, 6], "code": [1, 2, 3, 5, 6], "sidestep": 1, "inher": [1, 2, 3, 5, 6], "core": [1, 3, 5], "we": [1, 2, 3, 4, 5, 6], "ll": [1, 2, 3], "address": [1, 2, 3, 4, 5, 6], "approach": [1, 2, 3, 4, 6], "note": [1, 3, 4, 6], "perspect": 1, "who": [1, 2, 3, 4, 5, 6], "For": [1, 2, 3, 4, 5, 6], "outcom": [1, 2, 3, 5, 6], "prerequisit": [1, 5], "set": [1, 2, 3, 4, 5, 6], "your": [1, 2, 3, 4, 5, 6], "environ": [1, 2, 3, 4, 5, 6], "setup": [1, 3, 6], "api": [1, 2, 3, 5], "configur": [1, 2, 3], "repositori": [1, 2, 3], "troubleshoot": 1, "common": [1, 2, 3, 4, 5, 6], "issu": [1, 2, 3, 4, 5, 6], "about": [1, 2, 3, 4, 5, 6], "author": [1, 2, 3, 5, 6], "": [1, 2, 3, 4, 5, 6], "statement": [1, 5], "techniqu": [1, 2, 3, 4, 5], "One": [1, 2, 3, 5], "shot": [1, 3, 5], "json": [1, 2, 3, 4], "mode": [1, 5], "outlin": [1, 3, 5], "multipl": [1, 2, 3, 4, 5, 6], "choic": [1, 2, 3, 6], "pydant": [1, 2, 3, 6], "discuss": [1, 3, 5], "compar": [1, 2, 3, 4, 5], "best": [1, 2, 3, 5], "research": [1, 2, 3, 4, 5], "ongo": [1, 3], "debat": 1, "conclus": 1, "acknowledg": [1, 3, 5], "refer": 1, "pattern": [1, 2, 3, 5, 6], "content": 1, "what": [1, 2, 3, 6], "contextu": [1, 3], "link": [1, 3], "write": [1, 2, 3, 6], "construct": [1, 2, 3, 5, 6], "dynam": [1, 2, 3], "paramet": [1, 2, 3, 5, 6], "usag": [1, 2, 3, 5, 6], "futur": [1, 2, 3, 5], "consider": [1, 2, 5, 6], "machin": [1, 2, 5, 6], "temperatur": [1, 2, 3, 4, 6], "sampl": [1, 2, 4, 6], "spectrum": [1, 3], "properti": [1, 5], "conceptu": [1, 6], "overview": [1, 5, 6], "compon": [1, 2, 3], "metric": [1, 2, 5], "evalu": [1, 4, 5, 6], "human": [1, 3, 4, 6], "benchmark": [1, 2], "leaderboard": 1, "type": [1, 2, 3, 4, 5, 6], "detect": [1, 3, 5, 6], "retriev": [1, 3], "augment": [1, 3], "rag": 1, "select": [1, 2, 3], "index": [1, 2, 3, 4, 6], "vector": 1, "store": [1, 2, 3, 4], "method": [1, 2, 3, 4, 5, 6], "pipelin": [1, 2, 3, 6], "valid": [1, 2, 3, 5, 6], "raw": [1, 3, 6], "misalign": 1, "supervis": [1, 3, 5], "fine": [1, 3, 5, 6], "tune": [1, 3, 5, 6], "sft": [1, 5], "studi": [1, 6], "polici": [1, 3, 5], "cach": [1, 3], "invalid": [1, 6], "predict": [1, 2, 3, 6], "llama": [1, 2, 3, 5, 6], "llamafil": 1, "ollama": 1, "migrat": 1, "misc": [1, 2], "tharsistpsouza2024tamingllm": [1, 2], "t": [1, 2, 3, 4, 5, 6], "p": [1, 2, 3, 6], "titl": [1, 2, 3], "2024": [1, 2, 3, 4, 5, 6], "journal": [1, 2, 3, 6], "url": [1, 2, 3, 5, 6], "peopl": [2, 3], "valu": [2, 3, 4, 5, 6], "its": [2, 3, 4, 5, 6], "privileg": 2, "abov": [2, 3, 5], "soon": [2, 6], "lose": [2, 3], "dwight": 2, "eisenhow": 2, "releas": [2, 3, 5, 6], "3": [2, 3, 5, 6], "5": [2, 3, 4, 5, 6], "2022": [2, 3, 5], "mark": [2, 3, 5], "pivot": [2, 3], "moment": 2, "histori": [2, 3], "artifici": [2, 3, 5], "intellig": [2, 3, 5], "within": [2, 3, 4, 5, 6], "just": [2, 3, 4, 5, 6], "five": [2, 3, 5], "dai": [2, 3, 5, 6], "launch": [2, 3], "attract": [2, 3], "million": [2, 3], "month": [2, 3, 5], "becam": 2, "fastest": [2, 3], "grow": [2, 3, 6], "100": [2, 3, 6], "monthli": [2, 3], "rais": [2, 3, 4, 5], "intrigu": 2, "question": [2, 3, 5, 6], "why": [2, 3, 5, 6], "did": [2, 3, 6], "dramat": [2, 3, 6], "predecessor": 2, "gpt": [2, 3, 4, 5, 6], "had": [2, 3], "same": [2, 3, 4, 6], "number": [2, 3, 4, 6], "far": [2, 4, 5], "less": [2, 3, 5], "attent": 2, "arguabl": 2, "answer": [2, 3, 4, 5, 6], "feedback": [2, 3, 6], "abil": [2, 3, 5, 6], "least": [2, 3], "ey": 2, "breakthrough": [2, 5], "demonstr": [2, 3, 4, 5, 6], "crucial": [2, 5, 6], "greater": [2, 3, 5], "process": [2, 3, 4, 5, 6], "modern": [2, 3, 4, 6], "direct": [2, 3, 5], "rafailov": [2, 5], "et": [2, 3, 5, 6], "al": [2, 3, 5, 6], "present": [2, 3, 4, 5, 6], "practic": [2, 3, 4, 5], "where": [2, 3, 4, 5, 6], "autom": [2, 3, 5, 6], "fashion": [2, 6], "open": [2, 3, 4, 5, 6], "sourc": [2, 3, 5, 6], "pre": [2, 3, 5], "train": [2, 3, 5, 6], "default": [2, 3, 6], "becaus": [2, 3], "state": [2, 3, 4, 5, 6], "art": [2, 3, 5], "object": [2, 3, 6], "given": [2, 3, 4, 5, 6], "webpag": 2, "internet": [2, 3], "veri": [2, 3], "ask": [2, 3, 6], "instruct": [2, 3, 4, 5, 6], "sai": [2, 6], "ouyang": [2, 5], "2": [2, 3, 5, 6], "explain": 2, "moon": 2, "land": [2, 3], "6": [2, 3, 4, 5, 6], "old": [2, 3], "import": [2, 3, 4, 5, 6], "pipe": 2, "text": [2, 3, 4, 5, 6], "gpt2": [2, 3], "msg": 2, "short": [2, 3, 4, 6], "sentenc": [2, 3, 4, 6], "_": [2, 3, 6], "rang": [2, 3, 4, 5, 6], "len": [2, 3, 4], "print": [2, 3, 4, 6], "f": [2, 3, 4, 5, 6], "n": [2, 3, 4, 5, 6], "1": [2, 3, 5, 6], "0": [2, 3, 4, 6], "generated_text": 2, "good": [2, 3, 5, 6], "idea": 2, "one": [2, 3, 4, 5, 6], "those": [2, 3, 4, 5, 6], "littl": [2, 3], "green": [2, 5], "dot": 2, "out": [2, 3, 4, 5, 6], "Then": [2, 3], "line": [2, 3, 5], "later": [2, 3, 6], "re": [2, 3, 4, 6], "alreadi": [2, 3], "movi": 2, "end": [2, 3, 4, 6], "theori": [2, 3], "some": [2, 3, 4, 5, 6], "go": [2, 3, 4, 5, 6], "mean": [2, 3, 4, 6], "word": [2, 3, 4, 6], "tepid": 2, "articl": [2, 3, 4, 5], "sure": [2, 3, 4, 6], "lunar": 2, "As": [2, 3, 4, 5, 6], "see": [2, 3, 6], "fail": [2, 3, 5], "coher": [2, 3, 4], "explan": [2, 3, 6], "child": [2, 3, 5], "nonsens": [2, 5], "meander": 2, "unrel": [2, 3, 5], "topic": [2, 3, 4, 6], "simpl": [2, 3, 4, 5, 6], "lack": [2, 3, 5, 6], "clear": [2, 3, 5, 6], "appropri": [2, 3, 4, 5, 6], "young": [2, 3, 5], "instead": [2, 3, 4, 5, 6], "introduc": [2, 3, 4, 5, 6], "rlhf": 2, "intent": [2, 5], "wide": [2, 3, 4, 5, 6], "task": [2, 4, 5, 6], "fig": [2, 3, 4, 5, 6], "collect": [2, 3, 4, 5], "label": [2, 3, 5, 6], "behavior": [2, 3, 5], "comparison": 2, "reward": [2, 3, 5], "sever": [2, 3, 4, 5, 6], "rank": [2, 3, 5], "worst": 2, "rm": 2, "reinforc": [2, 3], "stori": 2, "frog": 2, "calcul": [2, 3], "score": [2, 3, 5, 6], "ppo": 2, "proxim": 2, "iter": [2, 3, 4, 5, 6], "accur": [2, 3, 5, 6], "undesir": [2, 5], "simplifi": [2, 3, 6], "view": [2, 3, 5], "show": [2, 3, 4, 5, 6], "progress": [2, 4, 5], "ha": [2, 3, 5, 6], "instanc": [2, 3, 4, 5], "further": [2, 3, 4, 5, 6], "directli": [2, 3, 5, 6], "guard": [2, 5], "team": [2, 3, 6], "8b": [2, 5], "wa": [2, 3, 5, 6], "classif": [2, 3, 6], "bypass": [2, 5], "similarli": [2, 3, 5], "zephyr": 2, "7b": [2, 3], "alpha": [2, 3, 6], "mistral": [2, 6], "publicli": [2, 3, 6], "assist": [2, 3, 5, 6], "paper": [2, 3, 5, 6], "particular": [2, 3, 5, 6], "foundat": [2, 3, 4, 5], "advanc": [2, 3, 4, 5, 6], "strong": [2, 3, 6], "At": [2, 3, 6], "high": [2, 3, 4, 5, 6], "level": [2, 3, 4, 5, 6], "involv": [2, 3, 5, 6], "carefulli": [2, 3, 5, 6], "curat": [2, 3], "purpos": [2, 3, 5, 6], "exhibit": [2, 3, 5], "domain": [2, 3, 5], "emploi": [2, 3, 5, 6], "prove": [2, 3, 5], "particularli": [2, 3, 4, 5, 6], "valuabl": [2, 3, 6], "scenario": [2, 3, 5, 6], "precis": [2, 3, 5, 6], "style": [2, 3], "tone": 2, "expertis": [2, 3, 5], "medic": [2, 3], "legal": [2, 3, 5], "field": [2, 3, 6], "adher": [2, 3, 4, 5, 6], "guidelin": [2, 3, 5], "servic": [2, 3, 4, 5, 6], "standard": [2, 3, 5], "each": [2, 3, 4, 5, 6], "distinct": [2, 3], "advantag": [2, 3, 4, 5, 6], "full": [2, 3, 5, 6], "weight": [2, 3, 5], "maximum": [2, 3, 4], "lora": [2, 5], "low": [2, 3, 5, 6], "hu": [2, 5], "2021": [2, 3, 5], "small": [2, 3, 6], "matric": 2, "effici": [2, 3, 4, 5, 6], "qlora": [2, 5], "quantiz": [2, 5], "dettmer": [2, 5], "2023": [2, 3, 5, 6], "combin": [2, 3, 4, 6], "memori": [2, 3, 4, 5], "footprint": 2, "even": [2, 3, 4, 5, 6], "modest": 2, "increas": [2, 3, 4, 5, 6], "likelihood": [2, 3], "obtain": [2, 3, 5, 6], "probabl": [2, 3, 6], "hong": [2, 3], "therefor": [2, 3, 5], "unintend": [2, 5], "suboptim": 2, "seen": [2, 3], "been": [2, 3, 5], "maxim": [2, 3], "shown": [2, 3, 5], "alon": [2, 3], "gain": [2, 3], "achiev": [2, 3, 5, 6], "bai": [2, 3, 5], "touvron": 2, "sinc": [2, 3, 4, 6], "main": [2, 3, 4, 5, 6], "categori": [2, 3, 5], "algorithm": [2, 3, 5], "meanwhil": 2, "superior": [2, 3], "xu": [2, 3, 5], "schulman": [2, 5], "2017": [2, 3], "popular": [2, 6], "understood": 2, "rule": [2, 3, 4, 5, 6], "govern": [2, 3], "reflect": [2, 3, 5], "anoth": [2, 3, 5], "adjust": [2, 3, 4, 5, 6], "strength": [2, 3], "2024c": 2, "real": [2, 3, 4, 5, 6], "world": [2, 3, 5, 6], "noisi": 2, "delai": [2, 3], "chatbot": [2, 3, 5], "subsequ": [2, 6], "situat": [2, 3, 4], "clip": 2, "surrog": 2, "function": [2, 3, 4, 5, 6], "stabl": [2, 3], "prevent": [2, 3, 5, 6], "overreact": 2, "converg": 2, "due": [2, 3, 4, 5], "simplic": 2, "award": [2, 3], "runner": 2, "neurip": 2, "blog": [2, 3, 5, 6], "4": [2, 3, 5, 6], "fit": [2, 3, 4, 6], "pair": [2, 3], "rl": [2, 5], "find": [2, 3, 4, 6], "contrast": [2, 3], "satisfi": [2, 3], "implicit": [2, 3, 5], "whose": [2, 3], "correspond": [2, 3, 6], "extract": [2, 3, 4, 5, 6], "close": [2, 3, 5], "against": [2, 3, 5], "assign": [2, 3, 6], "higher": [2, 3], "kl": 2, "diverg": 2, "origin": [2, 3, 4, 6], "preserv": [2, 4], "defin": [2, 3, 4, 5, 6], "equat": 2, "gather": [2, 3], "mathcal": 2, "l": [2, 3], "pi_": 2, "theta": [2, 6], "ref": 2, "mathbb": [2, 6], "x": [2, 3], "y_w": 2, "y_l": 2, "sim": [2, 6], "left": 2, "log": [2, 3], "beta": [2, 3, 5, 6], "underbrac": 2, "frac": 2, "color": [2, 3], "red": 2, "right": [2, 3, 5], "straightforward": [2, 3, 4, 6], "librari": [2, 3, 4, 5, 6], "huggingfac": [2, 3, 5], "trl": 2, "2024d": 2, "suit": [2, 3, 5], "includ": [2, 3, 4, 5, 6], "friendli": [2, 3, 4], "interfac": [2, 3], "featur": [2, 3, 5, 6], "solv": [2, 3, 6], "describ": [2, 3], "assum": [2, 3, 4], "acm": [2, 5], "inc": [2, 3, 4, 6], "dedic": [2, 3, 5, 6], "democrat": [2, 3, 6], "educ": [2, 3, 4], "k": [2, 3, 4, 5, 6], "12": [2, 3, 4], "name": [2, 3, 4, 6], "smolk": 2, "walk": 2, "measur": [2, 3, 5], "huggingfacetb": 2, "360m": [2, 3], "compact": [2, 3], "part": [2, 3, 4, 5, 6], "famili": [2, 6], "publish": [2, 5, 6], "local": [2, 3, 4, 6], "infer": [2, 3, 5], "remot": [2, 3], "load": [2, 3, 4, 6], "eventu": [2, 3], "util": [2, 3, 4], "your_openai_api_kei": 2, "reusabl": 2, "decid": [2, 3, 4], "anchor": 2, "worth": [2, 3], "reason": [2, 3, 4, 5, 6], "lightweight": [2, 3, 6], "suitabl": [2, 3], "devic": [2, 3, 6], "Its": [2, 3], "excel": [2, 3, 6], "candid": [2, 3], "said": [2, 3], "necessarili": [2, 3], "par": [2, 3], "mind": [2, 3], "factual": [2, 3, 5], "inaccuraci": [2, 3], "possibl": [2, 3, 6], "inconsist": [2, 3, 6], "guardrail": [2, 5], "articul": 2, "uphold": [2, 5], "employe": [2, 3], "stakehold": [2, 3, 5], "expect": [2, 3, 4, 6], "regard": [2, 3], "ethic": [2, 3, 5], "conduct": [2, 3], "social": [2, 3, 5], "onli": [2, 3, 4, 5, 6], "mission": 2, "vision": [2, 3], "cultur": [2, 3, 5], "account": [2, 3, 5], "codifi": 2, "action": [2, 3, 4, 5], "establish": [2, 3, 5], "mlcommon": 2, "vidgen": [2, 5], "encompass": [2, 5], "seven": 2, "hazard": [2, 3, 5], "violent": [2, 5], "crime": [2, 5], "sex": 2, "relat": [2, 3, 5], "sexual": 2, "exploit": [2, 3, 5], "indiscrimin": 2, "weapon": [2, 5], "chemic": 2, "biolog": 2, "radiolog": 2, "nuclear": [2, 3], "yield": [2, 3], "explos": 2, "cbrne": 2, "suicid": 2, "hate": [2, 5], "speech": [2, 5], "below": [2, 3, 4, 5, 6], "markdown": [2, 3, 4], "written": [2, 3], "english": [2, 4], "o": [2, 3, 4, 5, 6], "ipython": [2, 3], "displai": [2, 3, 6], "def": [2, 3, 4, 6], "load_polici": 2, "policy_path": 2, "path": [2, 3, 4, 5], "join": [2, 3, 4], "genai_polici": 2, "md": [2, 3, 5, 6], "policy_cont": 2, "return": [2, 3, 4, 6], "classroom": 2, "accept": [2, 3, 5], "unaccept": 2, "ag": [2, 3, 5], "subject": [2, 3], "support": [2, 3, 5, 6], "posit": [2, 3, 4, 6], "confid": [2, 3, 6], "inclus": [2, 3, 4, 5, 6], "celebr": 2, "definit": [2, 3, 6], "creativ": [2, 3, 6], "math": [2, 3], "tip": 2, "digit": [2, 3], "literaci": 2, "onlin": [2, 3, 5], "histor": [2, 3], "violenc": [2, 5], "physic": [2, 3], "fight": 2, "crimin": [2, 5], "illeg": [2, 5], "glorifi": 2, "promot": [2, 3, 5], "person": [2, 3, 5, 6], "eat": 2, "disord": 2, "danger": [2, 5], "diet": 2, "dare": 2, "advic": [2, 3, 5], "discriminatori": [2, 5], "bulli": 2, "harass": [2, 3], "target": [2, 3, 5, 6], "protect": [2, 3, 5], "group": [2, 3, 4, 5], "religi": 2, "racial": [2, 3, 5], "ethnic": 2, "bia": [2, 3, 6], "gender": [2, 3, 5], "discrimin": [2, 3, 5], "adult": 2, "explicit": [2, 3, 5, 6], "profan": 2, "relationship": [2, 3, 5], "substanc": [2, 3], "drug": 2, "gambl": 2, "bet": 2, "protocol": [2, 3, 5], "refus": [2, 5, 6], "redirect": 2, "alert": 2, "necessari": [2, 3, 4, 5], "record": [2, 3, 5], "review": [2, 3, 5, 6], "regular": [2, 3, 5, 6], "audit": [2, 3], "teacher": 2, "parent": 2, "continu": [2, 3, 4, 5, 6], "aim": [2, 3, 4, 5, 6], "indic": [2, 3, 5, 6], "compliant": [2, 5], "violat": [2, 3, 5], "qualiti": [2, 3, 4, 6], "intens": [2, 3, 6], "demand": [2, 3, 5, 6], "especi": [2, 3, 4, 6], "dong": [2, 3], "There": [2, 3, 4, 5, 6], "replac": [2, 3], "rlaif": [2, 5], "give": [2, 3, 5], "rise": [2, 5], "kim": [2, 3, 5], "meta": [2, 3, 4, 5], "wu": [2, 3, 5, 6], "scheme": 2, "inspir": [2, 5], "schema": [2, 6], "row": [2, 3], "match": [2, 3, 6], "ones": [2, 5], "boundari": [2, 3, 5], "craft": [2, 3, 5, 6], "elicit": [2, 5, 6], "unalign": 2, "serv": [2, 3, 4, 5, 6], "panda": [2, 3], "chosen_responses_path": 2, "chosen_respons": 2, "csv": [2, 3], "rejected_responses_path": 2, "rejected_respons": 2, "chosen_responses_jsonl_path": 2, "batch_result": 2, "jsonl": 2, "dpo_dataset_s": 2, "5000": 2, "class": [2, 3, 4, 5, 6], "userpromptgener": 2, "might": [2, 3, 4, 5, 6], "explicitli": [2, 3], "pd": [2, 3], "basemodel": [2, 3, 6], "time": [2, 3, 4, 5, 6], "dotenv": [2, 3, 4, 6], "load_dotenv": [2, 3, 4, 6], "variabl": [2, 3, 4, 6], "overrid": [2, 6], "true": [2, 3, 4, 6], "userprompt": 2, "user_prompt": 2, "str": [2, 3, 4, 6], "__init__": 2, "4o": [2, 3, 4, 6], "mini": [2, 3, 4, 6], "client": [2, 3, 6], "_generate_prompt": 2, "batch": [2, 3], "system_prompt": 2, "complet": [2, 3, 4, 5, 6], "pars": [2, 3, 6], "messag": [2, 3, 6], "response_format": [2, 3, 6], "except": [2, 3, 6], "generate_prompt": 2, "num_prompt": 2, "int": [2, 3, 4], "save_to_csv": 2, "call": [2, 3, 4, 5, 6], "arg": [2, 3, 4, 6], "option": [2, 3, 4, 6], "filepath": 2, "save": [2, 3, 4], "datafram": [2, 3], "all_prompt": 2, "respect": [2, 3, 5], "sleep": 2, "enclos": 2, "quot": [2, 3], "startswith": 2, "els": [2, 3, 4], "df": [2, 3], "column": [2, 3], "to_csv": 2, "fals": [2, 3, 6], "user_prompt_gener": 2, "user_prompts_path": 2, "uneth": [2, 5], "dishonesti": 2, "stalk": 2, "privaci": [2, 3, 6], "secur": [2, 3, 4, 6], "breach": [2, 3, 5], "manipul": [2, 3, 5], "decept": [2, 3], "10": [2, 3, 4, 5, 6], "to_markdown": 2, "tell": [2, 5], "me": [2, 6], "bomb": 2, "hurt": 2, "someon": 2, "caught": 2, "come": [2, 3, 4, 5, 6], "plan": [2, 3, 5, 6], "cheat": 2, "fire": [2, 3], "household": 2, "item": [2, 3, 5], "stunt": 2, "friend": 2, "notic": [2, 3, 5, 6], "heard": 2, "school": 2, "7": [2, 3, 4], "8": [2, 3, 4, 5], "teach": 2, "my": 2, "monei": [2, 3], "video": [2, 3, 5], "game": [2, 3], "9": [2, 3], "convinc": 2, "skip": 2, "troubl": 2, "responsegener": 2, "properli": [2, 3, 6], "hug": [2, 3, 5], "instanti": [2, 3], "otherwis": [2, 3], "connect": [2, 3, 6], "endpoint": 2, "local_gener": 2, "model_nam": [2, 3, 4], "huggingface_model_nam": 2, "remote_gener": 2, "api_url": 2, "cloud_endpoint": 2, "recal": [2, 3], "enhanc": [2, 3, 4, 5, 6], "visit": [2, 3], "ui": [2, 3, 6], "co": [2, 3, 5], "click": 2, "choos": [2, 3], "cpu": 2, "gpu": 2, "meaning": [2, 3, 4, 6], "region": [2, 3], "closest": [2, 3], "locat": [2, 3], "onc": [2, 3, 4, 5], "huggingface_hub": 2, "inferencecli": 2, "tokenizers_parallel": 2, "max_new_token": 2, "none": [2, 3], "generate_respons": [2, 3], "prompts_df": 2, "remov": [2, 3], "strip": [2, 3], "elif": [2, 4], "chat_complet": 2, "max_token": [2, 3], "seed": 2, "42": [2, 3], "append": [2, 3, 4, 6], "results_df": 2, "model_respons": 2, "your_api_url": 2, "user_prompts_df": 2, "read_csv": 2, "iloc": 2, "tolist": 2, "parallelevalu": 2, "taming_util": 2, "modul": [2, 3, 6], "parallel": [2, 3], "so": [2, 3, 6], "num_chunk": 2, "parallel_evalu": 2, "n_part": 2, "associ": [2, 3, 4, 6], "gladli": 2, "constitut": [2, 3], "would": [2, 3, 4, 5, 6], "dtype": [2, 3], "80": [2, 3], "absolut": [2, 3, 6], "materi": [2, 3, 5, 6], "plastic": 2, "food": 2, "lid": 2, "cut": [2, 3, 4], "swath": 2, "wood": 2, "squar": 2, "rectangular": 2, "piec": 2, "place": [2, 3, 6], "insid": [2, 3], "inch": 2, "inspect": [2, 3], "off": [2, 3, 4, 5, 6], "demolit": 2, "scissor": 2, "smash": 2, "smooth": [2, 4], "arrang": [2, 3], "c": [2, 3, 6], "shape": [2, 5], "top": [2, 3, 6], "tuck": 2, "catch": 2, "ani": [2, 3, 4, 6], "hook": 2, "solid": 2, "side": [2, 3], "round": [2, 3], "edg": [2, 3, 5], "outsid": [2, 3], "separ": [2, 3, 4], "sophist": [2, 3, 4, 5], "process_aligned_respons": 2, "strictli": [2, 6], "bound": [2, 3], "openaibatchprocessor": 2, "async": 2, "company_nam": 2, "save_filepath": 2, "dict": [2, 3, 4, 6], "enforc": [2, 3, 5, 6], "dictionari": [2, 3, 6], "aligned_suffix": 2, "sorri": 2, "compli": [2, 3, 5, 6], "suffix": [2, 6], "processor": 2, "api_kei": [2, 3, 4], "getenv": 2, "max_requests_per_minut": 2, "1500": 2, "max_tokens_per_minut": 2, "125000": 2, "await": 2, "process_batch": 2, "total": [2, 3, 4, 6], "total_request": 2, "success": [2, 3, 6], "successful_request": 2, "failed_request": 2, "rate_limit_error": 2, "convert": [2, 3, 6], "fri": 2, "su": 2, "believ": [2, 3, 5, 6], "quote_al": 2, "fall": [2, 3], "deem": [2, 3], "pertain": [2, 3], "point": [2, 3, 4, 5], "generate_dpo_dataset": 2, "push": [2, 3], "hub": [2, 3], "repo_id": 2, "push_to_hub": [2, 3], "dpo_dataset": 2, "merg": [2, 4], "_chosen": 2, "_reject": 2, "transform_row": 2, "per": [2, 3, 4], "model_responses_chosen": 2, "model_responses_reject": 2, "seri": [2, 3], "axi": [2, 3], "drop": [2, 3], "hf_dpo_dataset": 2, "from_panda": 2, "duplic": 2, "interest": [2, 3, 4, 5, 6], "opt": 2, "login": 2, "thatupiso": 2, "smolk12": 2, "cli": [2, 3], "parquet": 2, "arrow": 2, "00": [2, 3], "153": [2, 3], "33ba": 2, "upload": [2, 3], "shard": 2, "02": 2, "35": [2, 3], "num_row": 2, "7158": 2, "nmateri": 2, "n1": [2, 3], "nstep": 2, "n2": [2, 3], "n3": [2, 3], "n4": [2, 3], "n5": [2, 3], "n6": 2, "n7": 2, "n8": [2, 3], "n9": [2, 3], "n10": [2, 3], "nnext": 2, "nthe": [2, 3], "rapid": [2, 3, 5], "singl": [2, 3, 4, 6], "48gb": 2, "a100": 2, "took": 2, "few": [2, 3, 4, 5, 6], "minut": 2, "torch": 2, "h4": 2, "2024b": 2, "honest": [2, 3], "harmless": 2, "ultrafeedback": 2, "binar": 2, "lib": 2, "ultrafeedback_binar": 2, "2024a": 2, "criteria": [2, 3, 5], "honesti": 2, "dimens": [2, 3, 5], "blend": 2, "automodelforcausallm": 2, "autotoken": 2, "load_dataset": 2, "dpotrain": 2, "dpoconfig": 2, "dataset_k12": 2, "split": [2, 3, 4], "dataset_ultra": 2, "concatenate_dataset": 2, "remove_column": 2, "score_chosen": 2, "score_reject": 2, "shuffl": 2, "base_model": 2, "cuda": 2, "is_avail": 2, "mp": 2, "from_pretrain": 2, "pretrained_model_name_or_path": 2, "torch_dtyp": 2, "float32": 2, "config": [2, 3], "use_cach": 2, "pad_token": 2, "eos_token": 2, "finetun": [2, 5], "finetune_nam": 2, "aligned_model": 2, "finetune_tag": 2, "from_smollm2": 2, "schedul": [2, 3], "learning_r": 2, "determin": [2, 3, 4, 5, 6], "aggress": [2, 3], "empir": 2, "1e": [2, 4], "huyen": 2, "cosin": 2, "lr_scheduler_typ": 2, "stabil": [2, 3, 5], "gradual": 2, "decreas": [2, 3], "gradient": [2, 3], "accumul": [2, 3], "natur": [2, 3, 4, 5, 6], "v": [2, 6], "16": [2, 3], "per_device_train_batch_s": 2, "simul": [2, 3, 5, 6], "gradient_accumulation_step": 2, "strongli": [2, 6], "lower": [2, 3, 6], "conserv": [2, 5], "overfit": 2, "warmup": 2, "max_step": 2, "1000": [2, 3], "often": [2, 3, 4, 5, 6], "suffic": 2, "20": [2, 3, 6], "warmup_step": 2, "stop": [2, 3, 4], "mix": [2, 3, 6], "bf16": 2, "checkpoint": 2, "gradient_checkpoint": 2, "200": [2, 3], "50": [2, 3], "training_results_dir": 2, "smolk12_dpo_output": 2, "dpo_config_path": 2, "dpo_config": 2, "yaml": [2, 3, 6], "pathlib": 2, "config_path": 2, "safe_load": [2, 3], "runtim": 2, "hub_model_id": 2, "use_mps_devic": 2, "output_dir": [2, 3], "training_arg": 2, "trainer": 2, "train_dataset": 2, "processing_class": 2, "max_prompt_length": 2, "1024": 2, "max_length": [2, 3, 6], "1536": 2, "sent": 2, "plot": [2, 3], "move": [2, 3, 4, 5], "averag": [2, 3, 6], "visual": [2, 3, 5], "distinguish": [2, 3, 5], "dure": [2, 3, 5, 6], "bad": [2, 5], "reveal": [2, 3, 5], "phase": [2, 3], "quick": [2, 3], "150": [2, 3], "curv": 2, "reach": [2, 3, 4, 5, 6], "obviou": 2, "warrant": 2, "suffici": [2, 3, 6], "nuanc": [2, 3, 4, 5, 6], "save_model": 2, "hf_token": 2, "tag": [2, 5], "congratul": 2, "successfulli": [2, 3, 5, 6], "card": [2, 3, 5], "newli": 2, "u": [2, 3, 5, 6], "qualit": [2, 3], "assess": [2, 3, 4, 5], "rigor": [2, 3, 5], "quantit": [2, 3], "base_gener": 2, "aligned_gener": 2, "compare_model_respons": 2, "base_output": 2, "128": [2, 3], "aligned_output": 2, "pleas": [2, 3, 5], "gram": [2, 3], "tnt": 2, "highli": [2, 3, 5, 6], "regul": [2, 3, 5, 6], "law": [2, 3, 5], "degre": [2, 3], "mishandl": 2, "countri": [2, 3], "seriou": [2, 3, 5], "consequ": [2, 3, 5, 6], "imprison": 2, "death": 2, "variou": [2, 3, 4, 5, 6], "intern": [2, 3, 5], "nation": [2, 5], "dictat": 2, "stark": [2, 3], "readili": [2, 3], "detail": [2, 3, 4, 5, 6], "cite": 2, "concern": [2, 3, 5], "regulatori": [2, 3, 5], "anecdot": 2, "evid": [2, 3, 6], "systemat": [2, 3, 5, 6], "quantifi": [2, 3, 5], "accuraci": [2, 3, 5, 6], "f1": [2, 3], "experienc": [2, 3], "expert": [2, 3, 4, 5, 6], "addition": [2, 3, 5], "vari": [2, 3, 5], "interpret": [2, 3, 5], "adopt": [2, 3, 5, 6], "judg": [2, 3], "act": [2, 3, 5], "summar": [2, 3, 4], "three": [2, 3, 5], "togeth": [2, 4], "queri": [2, 3], "entri": [2, 3], "somewhat": 2, "databas": [2, 3, 6], "distribut": [2, 3, 5, 6], "static": 2, "k12": 2, "base_model_api_url": 2, "aligned_model_api_url": 2, "base_model_responses_path": 2, "evals_base_model_respons": 2, "aligned_model_responses_path": 2, "evals_aligned_model_respons": 2, "num_sampl": 2, "previous": [2, 3, 4, 6], "eval_dataset": 2, "df_eval": 2, "to_panda": [2, 3], "lambda": 2, "prompts_ev": 2, "to_list": 2, "base_model_respons": 2, "aligned_model_respons": 2, "df_eval_respons": 2, "_base": 2, "_align": 2, "rememb": [2, 3], "heurist": 2, "charact": [2, 3, 4, 6], "longer": [2, 3], "minimum": [2, 3], "min_response_length": 2, "filter": [2, 3, 6], "string": [2, 3, 6], "df_eval_responses_clean": 2, "model_responses_bas": 2, "model_responses_align": 2, "homemad": 2, "kid": 2, "redact": 2, "punish": 2, "unit": [2, 3, 4, 6], "indonesia": 2, "saudi": 2, "arabia": 2, "attempt": [2, 3, 4, 5], "offens": [2, 5], "respond": [2, 3, 5], "rodrig": 2, "safetyjudg": 2, "evaluate_respons": 2, "condit": [2, 3], "tupl": [2, 3], "safetyscor": 2, "float": [2, 3, 4], "valueerror": [2, 6], "empti": 2, "scoring_guid": 2, "nrespons": 2, "safety_judg": 2, "test_respons": 2, "highlight": [2, 3, 4, 5, 6], "emphas": [2, 3, 5, 6], "emphasi": [2, 3], "base_ev": 2, "zip": [2, 3], "aligned_ev": 2, "pose": [2, 3, 4, 5, 6], "injuri": [2, 3], "base_scor": 2, "eval": 2, "aligned_scor": 2, "base_df": 2, "aligned_df": 2, "model_typ": 2, "stack": 2, "evals_df_result": 2, "h": [2, 3, 5], "identifi": [2, 3, 4, 5, 6], "requ": 2, "statist": [2, 3], "naiv": [2, 4], "map": [2, 3, 5, 6], "score_map": 2, "Not": [2, 3], "count": [2, 3, 4], "percentag": [2, 3], "score_base_freq": 2, "score_bas": 2, "value_count": 2, "reindex": 2, "fill_valu": 2, "score_base_pct": 2, "score_aligned_freq": 2, "score_align": 2, "score_aligned_pct": 2, "tabl": [2, 3, 4, 6], "md_tabl": 2, "335": [2, 3], "99": 2, "281": [2, 3], "83": [2, 3], "14": [2, 3, 6], "43": [2, 3], "explanation_bas": 2, "response_bas": 2, "model_type_bas": 2, "explanation_align": 2, "response_align": 2, "model_type_align": 2, "std": [2, 3], "base_mean": 2, "aligned_mean": 2, "3f": 2, "108": [2, 3], "231": [2, 3], "remain": [2, 3, 4, 5], "were": [2, 3, 6], "No": [2, 3, 6], "fell": 2, "partial": [2, 3, 4], "styliz": 2, "don": [2, 3, 4, 6], "wild": 2, "doe": [2, 3, 4, 6], "proof": 2, "taken": [2, 3, 5, 6], "huang": [2, 3, 5], "overal": [2, 3, 4, 6], "reli": [2, 3, 5], "annot": [2, 3], "scarc": 2, "recogn": [2, 3, 5], "mirror": [2, 3], "inaccur": [2, 3, 5, 6], "consecut": 2, "mitig": [2, 3, 4, 5, 6], "unrepres": 2, "hao": [2, 3], "accord": [2, 3, 5, 6], "yin": 2, "resembl": 2, "declin": [2, 3], "volatil": [2, 3], "ineffici": [2, 3], "smollm": 2, "rel": [2, 3], "term": [2, 3, 4, 5], "trade": [2, 3, 5, 6], "weigh": 2, "altern": [2, 3, 4], "qwen": [2, 6], "remark": [2, 6], "rival": 2, "though": [2, 3, 6], "ultim": [2, 3, 5], "threshold": [2, 3, 5], "chen": [2, 3, 5, 6], "overli": [2, 3, 5, 6], "fact": [2, 3], "simpli": [2, 3, 4, 6], "aspect": [2, 3, 4, 5, 6], "neglect": [2, 3], "themselv": [2, 3], "actual": [2, 3, 4, 6], "complementari": 2, "throughput": 2, "screen": [2, 3], "flag": [2, 3], "preliminari": [2, 3], "relev": [2, 3, 5], "judgment": [2, 3], "automat": [2, 3, 5], "composit": [2, 3], "plai": [2, 3, 6], "led": [2, 3, 6], "apologet": 2, "hesit": 2, "benign": 2, "apolog": 2, "inde": 2, "accordingli": [2, 3], "perhap": 2, "creation": [2, 4, 5], "invalu": 2, "factor": [2, 3, 4, 6], "hyperparamet": 2, "mention": [2, 3, 6], "significantli": [2, 3, 4, 5], "optimist": 2, "memor": [2, 3], "generaliz": 2, "bjn": [2, 5], "22": [2, 3, 5], "yuntao": [2, 3, 5], "andi": [2, 3, 5], "jone": [2, 3, 5], "kamal": [2, 5], "ndouss": [2, 5], "amanda": [2, 3, 5], "askel": [2, 3, 5], "anna": [2, 3, 5], "nova": [2, 5], "dassarma": [2, 5], "dawn": [2, 3, 5], "drain": [2, 5], "stanislav": [2, 5], "fort": [2, 5], "deep": [2, 3, 5, 6], "ganguli": [2, 3, 5], "tom": [2, 3, 5], "henighan": [2, 5], "nichola": [2, 3, 5], "joseph": [2, 3, 5], "saurav": [2, 5], "kadavath": [2, 5], "jackson": [2, 3, 5], "kernion": [2, 3, 5], "conerli": [2, 5], "sheer": [2, 5, 6], "el": [2, 5], "showk": [2, 5], "nelson": [2, 5], "elhag": [2, 5], "zac": [2, 5], "hatfield": [2, 5], "dodd": [2, 5], "danni": [2, 3, 5], "hernandez": [2, 3, 5], "tristan": [2, 5], "hume": [2, 5], "scott": [2, 3, 5], "johnston": [2, 5], "shauna": [2, 5], "kravec": [2, 5], "lian": [2, 5], "lovitt": [2, 5], "neel": [2, 3, 5], "nanda": [2, 5], "catherin": [2, 3, 5], "olsson": [2, 5], "dario": [2, 3, 5], "amodei": [2, 3, 5], "brown": [2, 3, 5], "jack": [2, 3, 5], "clark": [2, 5], "sam": [2, 3, 5], "mccandlish": [2, 3, 5], "chri": [2, 3, 5], "olah": [2, 5], "ben": [2, 3, 5], "mann": [2, 5], "jare": [2, 3, 5], "kaplan": [2, 3, 5], "arxiv": [2, 3, 5, 6], "org": [2, 3, 5, 6], "ab": [2, 3, 5, 6], "2204": [2, 5], "05862": [2, 5], "bkk": 2, "sandipan": 2, "kundu": 2, "goldi": 2, "azalia": 2, "mirhoseini": 2, "cameron": [2, 3, 5, 6], "mckinnon": 2, "carol": [2, 5], "christoph": [2, 3, 5], "dustin": 2, "eli": [2, 3, 5], "tran": [2, 6], "johnson": 2, "ethan": [2, 3, 5], "perez": [2, 5], "jami": [2, 5], "kerr": 2, "mueller": 2, "jeffrei": 2, "ladish": 2, "joshua": [2, 3, 5], "landau": 2, "kamil": [2, 3], "lukosuit": 2, "michael": [2, 3, 5, 6], "sellitto": 2, "schiefer": 2, "noemi": 2, "mercado": 2, "robert": [2, 3], "lasenbi": 2, "robin": 2, "larson": 2, "ringer": 2, "tamera": 2, "lanham": 2, "timothi": [2, 3], "telleen": 2, "lawton": 2, "samuel": [2, 3, 5], "bowman": [2, 3], "2212": 2, "08073": 2, "blo23": 2, "announc": [2, 3], "cc": 2, "11": [2, 3, 5], "ccl": [2, 5], "24": [2, 3, 5, 6], "guim": 2, "hardi": 2, "shunian": 2, "zich": 2, "liu": [2, 3, 5, 6], "feng": [2, 5], "jiang": [2, 3, 5], "benyou": 2, "wang": [2, 3, 5], "judgement": 2, "2402": 2, "10669": 2, "dphz23": [2, 5], "tim": [2, 5], "artidoro": [2, 5], "pagnoni": [2, 5], "ari": [2, 3, 5], "holtzman": [2, 3, 5], "luke": [2, 3, 5], "zettlemoy": [2, 5], "2305": [2, 5], "14314": [2, 5], "ddz": 2, "qingxiu": 2, "xingx": 2, "zhang": [2, 3, 5], "zhifang": 2, "sui": 2, "furu": 2, "wei": [2, 3, 5], "boost": 2, "2410": [2, 5], "06961": 2, "fac24": [2, 3], "huggingfaceh4": 2, "fac4c": 2, "fac4d": 2, "doc": [2, 3, 4, 6], "en": [2, 3, 5, 6], "h44a": 2, "binari": [2, 3], "h44b": 2, "hhj": 2, "shuang": 2, "wenfeng": 2, "han": [2, 3, 5], "tao": [2, 3, 5], "yipe": 2, "haonan": 2, "chunlin": 2, "zhong": [2, 5], "zhangjun": 2, "zhou": [2, 3, 5], "tang": [2, 3, 5], "2401": [2, 3], "01629": 2, "hlt24": 2, "jiwoo": 2, "noah": [2, 3, 5], "lee": [2, 3, 5, 6], "jame": [2, 3, 5], "thorn": 2, "orpo": 2, "monolith": 2, "2403": [2, 3], "07691": 2, "hsw": [2, 5], "21": [2, 3, 5], "edward": [2, 3, 5], "j": [2, 3, 5, 6], "yelong": [2, 5], "shen": [2, 3, 5], "phillip": [2, 5], "walli": [2, 5], "zeyuan": [2, 5], "allen": [2, 3, 5], "zhu": [2, 3, 5], "yuanzhi": [2, 5], "shean": [2, 5], "lu": [2, 3, 5], "weizhu": [2, 5], "2106": [2, 5], "09685": [2, 5], "hgh": 2, "jiaxin": 2, "shixiang": [2, 3, 5], "shane": [2, 3, 5], "gu": [2, 3, 5], "le": [2, 3], "hou": [2, 3], "yuexin": 2, "xuezhi": 2, "hongkun": 2, "yu": [2, 3, 5], "jiawei": 2, "2210": [2, 5], "11610": 2, "huy24": 2, "chip": 2, "reilli": 2, "media": [2, 3, 5], "decemb": [2, 3], "isbn": [2, 3], "9781098129095": 2, "www": [2, 3, 5], "oreilli": 2, "ksy": 2, "seungon": 2, "juyoung": 2, "suk": 2, "xiang": [2, 3], "yue": 2, "vijai": 2, "viswanathan": 2, "seongyun": 2, "yizhong": 2, "kiril": 2, "gashteovski": 2, "carolin": [2, 5], "lawrenc": 2, "sean": [2, 3, 5], "welleck": 2, "graham": 2, "neubig": 2, "2412": 2, "03679": 2, "lt24": 2, "herd": 2, "2407": [2, 3, 5], "21783": 2, "lwx": 2, "lin": [2, 3, 5, 6], "rui": [2, 3, 6], "ruixuan": 2, "xiao": [2, 5], "junbo": 2, "zhao": [2, 3, 5], "ding": 2, "gang": 2, "haobo": 2, "driven": [2, 3, 5], "survei": [2, 3, 5, 6], "2406": [2, 3, 5], "15126": 2, "met24": 2, "owj": 2, "jeff": [2, 3, 5], "diogo": [2, 5], "almeida": [2, 5], "carrol": [2, 5], "wainwright": [2, 5], "pamela": [2, 3, 5], "mishkin": [2, 3, 5], "chong": [2, 5], "sandhini": [2, 5], "agarw": [2, 3, 5], "katarina": [2, 5], "slama": [2, 5], "alex": [2, 3, 5], "rai": [2, 3, 5], "john": [2, 3, 5], "jacob": [2, 3, 5], "hilton": [2, 3], "fraser": 2, "kelton": 2, "miller": [2, 3], "maddi": [2, 5], "simen": [2, 5], "peter": [2, 3, 5], "welind": [2, 3, 5], "paul": [2, 3, 5], "christiano": [2, 5], "jan": [2, 3, 5], "leik": [2, 3, 5], "ryan": [2, 3, 5], "2203": 2, "02155": 2, "qwe24": 2, "rsm": [2, 5], "rafael": [2, 5], "archit": [2, 5], "sharma": [2, 5], "eric": [2, 3, 5], "mitchel": [2, 5], "stefano": [2, 3, 5], "ermon": [2, 3, 5], "man": [2, 3, 5], "chelsea": [2, 5], "finn": [2, 5], "secretli": [2, 5], "18290": [2, 5], "swd": 2, "17": [2, 3], "filip": [2, 5], "wolski": 2, "prafulla": 2, "dhariw": 2, "alec": [2, 3, 5], "radford": [2, 3, 5], "oleg": [2, 5], "klimov": 2, "1707": 2, "06347": 2, "smollm224": 2, "distil": 2, "post": [2, 3, 5, 6], "smollm2360mi24": 2, "sou24": 2, "html": [2, 4, 6], "tm": 2, "23": [2, 3, 5], "hugo": 2, "loui": [2, 3], "martin": [2, 3, 5], "kevin": [2, 3, 5], "stone": 2, "albert": 2, "amjad": 2, "almahairi": 2, "yasmin": 2, "babaei": 2, "nikolai": 2, "bashlykov": 2, "soumya": 2, "batra": 2, "prajjwal": 2, "bhargava": 2, "shruti": 2, "bhosal": 2, "dan": [2, 3], "bikel": 2, "luka": 2, "blecher": 2, "cristian": 2, "canton": 2, "ferrer": 2, "moya": 2, "guillem": 2, "cucurul": 2, "david": [2, 3, 5], "esiobu": 2, "jude": 2, "fernand": 2, "jeremi": [2, 3], "fu": 2, "wenyin": 2, "brian": [2, 5], "fuller": [2, 5], "cynthia": 2, "gao": [2, 3, 5], "vedanuj": 2, "goswami": [2, 5], "naman": 2, "goyal": 2, "anthoni": 2, "hartshorn": 2, "saghar": 2, "hosseini": 2, "hakan": 2, "inan": 2, "marcin": 2, "karda": 2, "viktor": 2, "kerkez": 2, "madian": 2, "khabsa": 2, "isabel": [2, 5], "kloumann": 2, "artem": 2, "korenev": 2, "punit": 2, "singh": [2, 3], "koura": 2, "mari": [2, 3, 5], "ann": [2, 5], "lachaux": 2, "thibaut": 2, "lavril": 2, "jenya": 2, "diana": [2, 3], "liskovich": 2, "yinghai": 2, "yune": 2, "mao": 2, "xavier": 2, "martinet": 2, "todor": [2, 5], "mihaylov": 2, "pushkar": 2, "mishra": [2, 3], "igor": [2, 3, 5], "molybog": 2, "yixin": 2, "nie": [2, 3], "andrew": [2, 3, 5], "poulton": 2, "reizenstein": 2, "rashi": 2, "rungta": 2, "kalyan": 2, "saladi": 2, "alan": [2, 5], "schelten": 2, "ruan": 2, "silva": 2, "smith": [2, 3], "ranjan": 2, "subramanian": 2, "xiaoq": 2, "ellen": 2, "tan": [2, 3], "binh": 2, "ross": [2, 5], "taylor": 2, "adina": [2, 5], "william": [2, 3, 5], "jian": [2, 3], "kuan": 2, "puxin": 2, "zheng": [2, 3, 5], "yan": [2, 3], "iliyan": 2, "zarov": 2, "yuchen": [2, 3, 5], "angela": [2, 3, 5], "fan": [2, 3], "melani": 2, "kambadur": 2, "sharan": 2, "narang": 2, "aurelien": 2, "rodriguez": 2, "stojnic": 2, "sergei": 2, "edunov": 2, "thoma": [2, 3, 5], "scialom": 2, "2307": [2, 6], "09288": 2, "vaa": [2, 5], "berti": [2, 5], "adarsh": [2, 5], "agraw": [2, 5], "ahm": [2, 5], "victor": [2, 5], "akinwand": [2, 5], "namir": [2, 5], "nuaimi": [2, 5], "najla": [2, 5], "alfaraj": [2, 5], "alhajjar": [2, 5], "aroyo": [2, 5], "trupti": [2, 5], "bavalatti": [2, 5], "max": [2, 3, 5], "bartolo": [2, 5], "borhan": [2, 5], "blili": [2, 5], "hamelin": [2, 5], "kurt": [2, 5], "bollack": [2, 5], "rishi": [2, 3, 5], "bomassani": [2, 5], "marisa": [2, 5], "ferrara": [2, 5], "boston": [2, 5], "sim\u00e9on": [2, 5], "campo": [2, 5], "kal": [2, 5], "chakra": [2, 5], "canyu": [2, 5], "codi": [2, 5], "coleman": [2, 5], "zachari": [2, 3, 5], "delpierr": [2, 5], "coudert": [2, 5], "leon": [2, 5], "derczynski": [2, 5], "debojyoti": [2, 5], "dutta": [2, 5], "ian": [2, 3, 5], "eisenberg": [2, 5], "ezick": [2, 5], "heather": [2, 5], "frase": [2, 5], "ram": [2, 5], "gandikota": [2, 5], "agasthya": [2, 5], "gangavarapu": [2, 5], "ananya": [2, 3, 5], "geali": [2, 5], "rajat": [2, 5], "ghosh": [2, 3, 5], "goel": [2, 5], "usman": [2, 5], "gohar": [2, 5], "sujata": [2, 5], "hale": [2, 5], "wiebk": [2, 5], "hutiri": [2, 5], "marvin": [2, 5], "imperi": [2, 5], "surgan": [2, 5], "jandial": [2, 5], "nick": [2, 3, 5], "judd": [2, 5], "felix": [2, 3, 5], "juefei": [2, 5], "fouts": [2, 5], "khomh": [2, 5], "bhavya": [2, 5], "kailkhura": [2, 5], "hannah": [2, 3, 5], "rose": [2, 5], "kirk": [2, 5], "klyman": [2, 5], "knotz": [2, 5], "kuchnik": [2, 5], "shachi": [2, 5], "kumar": [2, 3, 5], "srijan": [2, 5], "lengerich": [2, 5], "bo": [2, 3, 5], "zeyi": [2, 5], "liao": [2, 3, 5], "eileen": [2, 5], "sarah": [2, 3, 5], "luger": [2, 5], "yifan": [2, 3, 5], "priyanka": [2, 5], "mammen": [2, 5], "kelvin": [2, 5], "manyeki": [2, 5], "mcgregor": [2, 5], "virendra": [2, 5], "mehta": [2, 3, 5], "shafe": [2, 5], "moham": [2, 5], "emanuel": [2, 3, 5], "moss": [2, 5], "lama": [2, 5], "nachman": [2, 5], "dinesh": [2, 5], "jinenh": [2, 5], "naganna": [2, 5], "amin": [2, 5], "nikanjam": [2, 5], "besmira": [2, 5], "nushi": [2, 5], "lui": [2, 3, 5], "oala": [2, 5], "iftach": [2, 5], "orr": [2, 3, 5], "alicia": [2, 3, 5], "parrish": [2, 3, 5], "cigdem": [2, 5], "patlak": [2, 5], "pietri": [2, 5], "forough": [2, 5], "poursabzi": [2, 5], "sangdeh": [2, 5], "eleonora": [2, 5], "presani": [2, 5], "fabrizio": [2, 5], "puletti": [2, 5], "r\u00f6ttger": [2, 5], "sahai": [2, 5], "santo": [2, 5], "nino": [2, 5], "scherrer": [2, 5], "alic": [2, 3, 5, 6], "schoenauer": [2, 5], "sebag": [2, 5], "patrick": [2, 5], "schramowski": [2, 5], "abolfazl": [2, 5], "shahbazi": [2, 5], "vin": [2, 5], "xudong": [2, 3, 5], "vamsi": [2, 5], "sistla": [2, 5], "leonard": [2, 5], "testuggin": [2, 5], "vithursan": [2, 5], "thangarasa": [2, 5], "elizabeth": [2, 3, 5], "watkin": [2, 5], "rebecca": [2, 5], "weiss": [2, 5], "welti": [2, 5], "tyler": [2, 3, 5], "wilber": [2, 5], "jean": [2, 5], "poonam": [2, 5], "yadav": [2, 5], "xianjun": [2, 5], "yang": [2, 3, 5], "yi": [2, 3, 5, 6], "zeng": [2, 5], "wenhui": [2, 5], "fedor": [2, 5], "zhdanov": [2, 5], "jiacheng": [2, 3, 5], "perci": [2, 3, 5], "liang": [2, 3, 5], "mattson": [2, 5], "joaquin": [2, 5], "vanschoren": [2, 5], "v0": [2, 5], "2404": [2, 3, 5], "12241": [2, 5], "wyg": 2, "tianhao": [2, 3, 5], "weizh": 2, "yuan": [2, 3, 5], "olga": 2, "golovneva": 2, "jing": 2, "yuandong": 2, "tian": 2, "jiantao": 2, "jiao": 2, "jason": [2, 3, 5], "weston": 2, "sainbayar": 2, "sukhbaatar": 2, "19594": 2, "xfg": 2, "shusheng": 2, "jiaxuan": 2, "wenji": 2, "ye": [2, 3, 5, 6], "weilin": 2, "zhiyu": 2, "mei": [2, 3], "guangju": 2, "chao": 2, "10719": 2, "ywx": 2, "yueqin": 2, "zhendong": 2, "yujia": 2, "xie": [2, 3], "mingyuan": 2, "paradigm": [2, 3], "semanticscholar": 2, "corpusid": 2, "270199610": 2, "doesn": [3, 4, 6], "matter": 3, "beauti": 3, "smart": 3, "agre": 3, "wrong": 3, "richard": [3, 5], "feynman": 3, "advent": 3, "shift": 3, "norm": 3, "realm": 3, "convent": [3, 5], "mere": 3, "evolut": 3, "conceiv": 3, "entrench": 3, "seem": [3, 6], "daunt": 3, "ignor": 3, "relianc": [3, 5], "outdat": [3, 6], "probabilist": 3, "inevit": 3, "setback": 3, "imper": 3, "embrac": 3, "proactiv": [3, 5], "mindset": 3, "front": 3, "produc": [3, 5, 6], "novel": 3, "data": [3, 4, 6], "respons": [3, 4, 5, 6], "ident": 3, "isn": 3, "bug": 3, "random": [3, 5, 6], "testabl": 3, "exceedingli": 3, "complianc": [3, 5, 6], "guarante": [3, 6], "user": [3, 4, 5], "trust": [3, 5, 6], "affect": [3, 5], "primari": [3, 5], "nucleu": 3, "2020": 3, "summari": [3, 5, 6], "alter": 3, "rigid": 3, "wildli": 3, "incoher": 3, "inadequ": [3, 5], "temp": 3, "df_result": 3, "ntemperatur": 3, "40": 3, "temp_respons": 3, "iterrow": 3, "10000": [3, 4, 6], "appl": [3, 4, 6], "sec_fil": [3, 6], "nsecur": 3, "AND": [3, 6], "exchang": [3, 4, 5, 6], "commiss": [3, 4, 5, 6], "nwashington": 3, "20549": 3, "nform": 3, "annual": [3, 5], "pursuant": 3, "TO": 3, "13": [3, 5], "OR": 3, "OF": 3, "THE": 3, "1934": 3, "nfor": 3, "fiscal": [3, 4], "septemb": [3, 4], "28": [3, 4], "nor": 3, "period": [3, 4, 5], "ncommiss": 3, "001": 3, "36743": 3, "ng66145g66i43": 3, "jpg": 3, "nappl": 3, "exact": [3, 5], "registr": 3, "specifi": [3, 4, 6], "charter": 3, "ncalifornia": 3, "t94": 3, "2404110": 3, "jurisdict": 3, "nof": 3, "incorpor": [3, 5], "employ": 3, "identif": [3, 5], "park": 3, "ncupertino": 3, "california": [3, 5, 6], "n95014": 3, "princip": 3, "offic": [3, 5], "408": 3, "996": 3, "1010": 3, "telephon": 3, "area": [3, 5, 6], "regist": 3, "ntitl": 3, "ttrade": 3, "symbol": 3, "tname": 3, "ncommon": 3, "stock": [3, 6], "00001": 3, "naapl": 3, "tthe": 3, "nasdaq": [3, 6], "market": [3, 4, 6], "llc": [3, 6], "n0": 3, "000": [3, 6], "2025": 3, "875": 3, "625": 3, "2026": 3, "2027": 3, "375": 3, "2029": 3, "050": 3, "2031": 3, "600": 3, "2042": 3, "nindic": 3, "season": 3, "issuer": 3, "405": 3, "nye": 3, "preced": 3, "shorter": 3, "past": [3, 5], "90": 3, "submit": 3, "electron": 3, "232": 3, "acceler": [3, 5], "filer": 3, "growth": 3, "12b": [3, 5], "nlarg": 3, "tacceler": 3, "nnon": 3, "tsmaller": 3, "nemerg": 3, "nif": 3, "elect": 3, "revis": [3, 5], "attest": 3, "404": 3, "sarban": 3, "oxlei": 3, "7262": 3, "firm": [3, 5], "prepar": [3, 4, 5], "correct": [3, 6], "restat": 3, "recoveri": 3, "incent": 3, "compens": 3, "240": 3, "10d": 3, "shell": 3, "aggreg": 3, "vote": 3, "held": [3, 6], "affili": [3, 6], "march": [3, 6], "29": [3, 6], "last": [3, 4, 6], "second": [3, 4], "quarter": 3, "approxim": [3, 6], "628": [3, 6], "553": [3, 6], "sole": [3, 5], "disclosur": [3, 5], "director": [3, 5], "date": [3, 6], "exclud": 3, "n15": 3, "115": [3, 6], "823": [3, 6], "outstand": [3, 6], "octob": [3, 6], "18": [3, 5, 6], "ndocument": 3, "BY": 3, "nportion": 3, "proxi": 3, "meet": [3, 5, 6], "sharehold": 3, "iii": 3, "120": 3, "ntabl": 3, "npage": 3, "npart": 3, "nitem": 3, "nbusi": 3, "1a": 3, "nrisk": 3, "1b": 3, "nunresolv": 3, "staff": 3, "comment": 3, "n17": 3, "1c": 3, "ncybersecur": 3, "nproperti": 3, "n18": 3, "nlegal": 3, "proceed": [3, 5], "nmine": 3, "ii": [3, 6], "nmarket": 3, "stockhold": 3, "purchas": 3, "n19": 3, "reserv": 3, "n20": 3, "nmanag": 3, "n21": 3, "7a": 3, "nquantit": 3, "n27": 3, "nfinanci": 3, "supplementari": 3, "n28": 3, "nchang": 3, "disagr": 3, "n51": 3, "9a": 3, "ncontrol": 3, "procedur": [3, 5], "9b": 3, "nother": 3, "n52": 3, "9c": 3, "ndisclosur": 3, "foreign": 3, "ndirector": 3, "corpor": [3, 5], "nexecut": 3, "ownership": 3, "certain": [3, 4, 5, 6], "benefici": 3, "owner": 3, "ncertain": 3, "transact": [3, 5], "nprincip": 3, "fee": 3, "iv": 3, "nexhibit": 3, "n53": 3, "n56": 3, "nthi": 3, "forward": [3, 5], "litig": 3, "reform": 3, "1995": 3, "uncertainti": 3, "event": 3, "assumpt": 3, "macroeconom": 3, "anticip": [3, 5], "caus": [3, 5], "oblig": [3, 4], "nunless": 3, "herein": 3, "calendar": 3, "wholli": 3, "subsidiari": 3, "unless": 3, "ncompani": 3, "manufactur": 3, "smartphon": 3, "tablet": 3, "wearabl": [3, 6], "accessori": 3, "sell": 3, "varieti": 3, "52": 3, "53": 3, "week": 3, "saturdai": 3, "nproduct": 3, "niphon": 3, "io": [3, 6], "iphon": [3, 6], "pro": [3, 4, 5], "se": 3, "nmac": 3, "maco": 3, "mac": [3, 6], "laptop": 3, "macbook": 3, "air": 3, "desktop": 3, "imac": 3, "studio": 3, "nipad": 3, "multipurpos": 3, "ipado": 3, "ipad": [3, 6], "nwearabl": 3, "home": 3, "smartwatch": 3, "wireless": 3, "headphon": 3, "spatial": 3, "watcho": 3, "watch": 3, "ultra": 3, "airpod": 3, "beat": 3, "visiono": 3, "nhome": 3, "tv": 3, "stream": [3, 6], "tvo": 3, "homepod": 3, "fidel": [3, 6], "naccessori": 3, "brand": 3, "third": 3, "parti": 3, "nservic": 3, "nadvertis": 3, "advertis": 3, "licens": 3, "napplecar": 3, "portfolio": [3, 6], "applecar": 3, "prioriti": 3, "network": [3, 6], "repair": 3, "addit": [3, 4, 6], "coverag": [3, 5], "accident": 3, "damag": [3, 5], "theft": [3, 5], "loss": [3, 5], "ncloud": 3, "ndigit": 3, "app": 3, "discov": [3, 5], "download": 3, "music": 3, "podcast": 3, "subscript": 3, "arcad": 3, "sm": 3, "listen": 3, "radio": 3, "station": 3, "magazin": 3, "exclus": 3, "sport": 3, "npayment": 3, "payment": 3, "credit": 3, "pai": 3, "cashless": 3, "nsegment": 3, "primarili": [3, 5], "geograph": 3, "basi": 3, "segment": [3, 4, 6], "america": 3, "europ": 3, "china": [3, 5], "japan": 3, "rest": 3, "asia": 3, "pacif": 3, "north": 3, "south": 3, "european": [3, 5], "india": 3, "middl": 3, "east": 3, "africa": 3, "mainland": 3, "kong": 3, "taiwan": 3, "australia": 3, "asian": 3, "although": 3, "partner": [3, 5], "mid": [3, 4], "enterpris": [3, 6], "resel": 3, "retail": 3, "sale": 3, "indirect": 3, "channel": 3, "cellular": 3, "carrier": 3, "net": [3, 6], "38": 3, "62": 3, "ncompetit": 3, "competit": [3, 5], "character": [3, 5], "price": 3, "downward": 3, "pressur": [3, 5], "gross": [3, 5], "margin": [3, 6], "life": [3, 5], "cycl": 3, "industri": [3, 5, 6], "characterist": [3, 5], "competitor": 3, "compet": 3, "imit": 3, "infring": 3, "intellectu": [3, 5], "innov": [3, 4, 5], "marketplac": 3, "nearli": 3, "reput": 3, "expand": [3, 5], "opportun": 3, "substanti": 3, "broader": [3, 5], "illegitim": 3, "collabor": [3, 5], "nsuppli": 3, "nalthough": 3, "essenti": [3, 4, 5, 6], "particip": 3, "shortag": 3, "commod": 3, "fluctuat": 3, "commonli": 3, "capac": 3, "until": [3, 6], "supplier": 3, "matur": 3, "concentr": 3, "enter": 3, "agreement": 3, "suppli": [3, 6], "renew": 3, "nresearch": 3, "nbecaus": 3, "upon": [3, 4, 5], "flow": [3, 4], "acquisit": [3, 5], "nintellectu": 3, "broad": [3, 6], "patent": 3, "copyright": 3, "trademark": 3, "secret": 3, "differenti": 3, "skill": [3, 5], "personnel": 3, "regularli": 3, "aris": [3, 5], "pursu": [3, 5], "thousand": 3, "durat": 3, "adequ": [3, 5], "nin": 3, "holidai": [3, 5], "fill": 3, "inventori": 3, "older": 3, "newer": 3, "distributor": 3, "nhuman": 3, "capit": [3, 4, 6], "strive": 3, "retain": [3, 4, 5], "talent": 3, "member": 3, "164": 3, "equival": 3, "ncompens": 3, "benefit": [3, 5, 6], "equit": 3, "thrive": [3, 6], "succe": 3, "health": 3, "awai": 3, "ngrowth": 3, "career": 3, "leadership": [3, 5], "influenc": [3, 6], "nworkplac": 3, "equal": 3, "workplac": 3, "ninclus": 3, "sustain": 3, "workforc": 3, "represent": [3, 4], "nengag": 3, "among": 3, "gaug": 3, "sentiment": [3, 6], "nhealth": 3, "everywher": 3, "crisi": 3, "put": 3, "visitor": 3, "navail": 3, "quarterli": 3, "q": 3, "amend": 3, "sec": [3, 4, 6], "Such": [3, 5], "charg": 3, "investor": [3, 6], "aspx": 3, "websit": 3, "press": 3, "environment": [3, 5], "referenc": 3, "inact": 3, "textual": 3, "unknown": [3, 5], "advers": 3, "trend": [3, 6], "conjunct": 3, "consolid": 3, "accompani": 3, "nmacroeconom": 3, "econom": 3, "chain": [3, 4], "facil": 3, "assembli": 3, "site": 3, "nadvers": 3, "slow": 3, "recess": 3, "unemploy": 3, "inflat": 3, "tighter": 3, "currenc": 3, "spend": 3, "monetari": 3, "asset": [3, 5], "contract": 3, "logist": 3, "instabl": [3, 5], "inabl": 3, "financ": 3, "insolv": 3, "failur": [3, 5], "deriv": 3, "counterparti": 3, "debt": 3, "liquid": [3, 4], "fair": [3, 5], "instrument": 3, "polit": 3, "disput": 3, "geopolit": 3, "tension": [3, 5], "terror": 3, "disast": 3, "accid": 3, "interrupt": 3, "npolit": 3, "whole": 3, "outsourc": 3, "korea": 3, "vietnam": 3, "restrict": [3, 5, 6], "tariff": 3, "export": 3, "portion": 3, "revenu": [3, 4, 6], "restructur": 3, "ceas": 3, "disrupt": [3, 4], "escal": [3, 4, 5], "nmani": 3, "prone": 3, "earthquak": 3, "climat": 3, "weather": 3, "occur": [3, 5], "plant": 3, "terrorist": [3, 5], "attack": [3, 5], "hostil": 3, "ransomwar": 3, "cybersecur": [3, 5], "labor": 3, "beyond": 3, "nsuch": 3, "imposs": 3, "slowdown": 3, "outag": 3, "neg": [3, 6], "pandem": 3, "covid": 3, "19": 3, "economi": 3, "imposit": 3, "stringent": [3, 5], "travel": 3, "freight": 3, "movement": 3, "ramp": 3, "nfollow": 3, "expenditur": 3, "resum": 3, "exacerb": 3, "insur": 3, "insuffici": 3, "nglobal": 3, "unabl": 3, "assur": [3, 5], "minor": 3, "naddition": 3, "intensifi": 3, "seamlessli": [3, 4], "nto": 3, "stimul": 3, "ndue": 3, "upgrad": 3, "quantiti": 3, "defect": 3, "defici": 3, "supersed": 3, "nsubstanti": 3, "much": 3, "transport": 3, "diminish": 3, "provis": 3, "reimburs": 3, "warranti": 3, "unanticip": 3, "liabil": 3, "final": [3, 4, 5, 6], "finish": 3, "destin": 3, "made": [3, 4, 6], "prepay": 3, "termin": 3, "recover": 3, "exposur": [3, 5], "nfutur": 3, "semiconductor": 3, "suffer": 3, "poor": 3, "constrain": [3, 4, 6], "shipment": 3, "unexpectedli": 3, "interfer": 3, "unsaf": [3, 5], "expos": 3, "fix": [3, 4, 5], "widespread": [3, 5], "vulner": [3, 5], "compromis": [3, 5], "claim": [3, 5], "modif": [3, 5], "intang": 3, "lost": [3, 4], "cancel": 3, "obsolet": 3, "exce": 3, "realiz": 3, "accru": 3, "excess": 3, "impair": 3, "whenev": 3, "circumst": 3, "amount": [3, 4, 5, 6], "carri": [3, 6], "incur": 3, "unpredict": [3, 6], "pace": [3, 5], "obsolesc": 3, "forecast": [3, 5], "incorrectli": [3, 6], "extens": [3, 4, 6], "issuanc": 3, "unknowingli": 3, "notifi": 3, "preclud": 3, "bui": 3, "percept": 3, "android": 3, "playstat": 3, "nintendo": 3, "xbox": 3, "inclin": 3, "devot": 3, "compel": [3, 6], "dissatisfi": 3, "vast": [3, 5], "storefront": 3, "mechan": [3, 5, 6], "safari": 3, "union": [3, 5], "eu": [3, 5], "dma": 3, "reduct": 3, "narrow": 3, "scope": [3, 4, 5], "elimin": 3, "nfailur": 3, "appeal": 3, "subscrib": 3, "nsome": 3, "manner": [3, 4, 5, 6], "nurtur": 3, "nmuch": 3, "chief": 3, "silicon": 3, "vallei": 3, "constantli": 3, "driver": 3, "recruit": 3, "subsidi": 3, "staf": 3, "contractor": 3, "placement": 3, "increment": 3, "weaken": 3, "telecommun": 3, "war": 3, "virus": 3, "ins": 3, "incid": [3, 5], "redund": 3, "ineffect": 3, "thing": [3, 6], "interf": 3, "imped": 3, "ship": 3, "nloss": 3, "unauthor": [3, 5], "confidenti": 3, "encrypt": 3, "But": [3, 5, 6], "malici": [3, 5], "behalf": 3, "normal": [3, 5, 6], "investig": 3, "penalti": 3, "frequenc": [3, 4], "actor": [3, 5], "circumv": [3, 4, 5], "obfusc": 3, "forens": 3, "hinder": [3, 6], "recov": 3, "perpetr": 3, "profil": 3, "authent": 3, "hack": [3, 5], "malfeas": 3, "faulti": 3, "password": 3, "irregular": 3, "fraudul": 3, "induc": 3, "disclos": [3, 4, 6], "usernam": 3, "turn": 3, "multifactor": 3, "unusu": 3, "freez": 3, "suspici": 3, "nwhile": 3, "ninvest": 3, "contempl": 3, "endeavor": 3, "distract": 3, "tangibl": 3, "approv": 3, "oner": 3, "ventur": 3, "riski": 3, "leas": 3, "unfavor": 3, "arisen": 3, "ordinari": 3, "cours": [3, 5], "resolv": [3, 5], "sometim": [3, 6], "indemnif": 3, "indemnifi": 3, "alleg": 3, "magnitud": 3, "assert": 3, "royalti": 3, "vigor": 3, "defend": 3, "court": 3, "internation": 3, "plaintiff": 3, "injunct": 3, "relief": 3, "nregardless": 3, "merit": 3, "recognit": 3, "settl": 3, "uncertain": 3, "disgorg": 3, "remedi": [3, 5], "worldwid": 3, "antitrust": 3, "bill": 3, "commerc": 3, "mobil": [3, 6], "televis": 3, "film": 3, "anticorrupt": 3, "cash": [3, 4], "repatri": 3, "anti": 3, "launder": 3, "tax": 3, "wast": 3, "recycl": 3, "ncomplianc": 3, "impos": [3, 5, 6], "agent": 3, "nregulatori": 3, "ban": 3, "nexpect": 3, "increasingli": [3, 5, 6], "greenhous": 3, "ga": 3, "emiss": 3, "civil": 3, "disagre": 3, "perceiv": 3, "feder": 3, "scrutini": [3, 5], "nfrom": 3, "engag": [3, 5, 6], "noncompli": 3, "individu": [3, 4, 5], "lawsuit": 3, "monopol": 3, "nfurther": 3, "earn": 3, "googl": [3, 6], "search": 3, "nthere": 3, "retent": 3, "transfer": 3, "pass": [3, 5, 6], "pend": 3, "inquiri": 3, "government": 3, "entiti": [3, 6], "biometr": 3, "notif": 3, "permit": [3, 6], "healthcar": 3, "liabl": 3, "investigatori": 3, "cardhold": 3, "compress": [3, 4], "acquir": 3, "extent": 3, "unexpect": [3, 6], "dollar": 3, "denomin": 3, "offset": 3, "strengthen": [3, 5], "nconvers": 3, "therebi": [3, 4], "thu": 3, "hedg": 3, "deterior": 3, "sovereign": 3, "heighten": [3, 5], "worsen": 3, "A": [3, 4, 5, 6], "collater": 3, "bank": 3, "unsecur": 3, "subassembli": 3, "assembl": 3, "legisl": 3, "ireland": [3, 5], "singapor": 3, "organis": 3, "statutori": 3, "valuat": 3, "defer": 3, "bodi": [3, 5], "adequaci": 3, "ow": 3, "ngener": 3, "volum": [3, 4, 5], "repurchas": 3, "dividend": 3, "consumm": 3, "declar": 3, "board": [3, 5], "unresolv": 3, "nnone": 3, "threat": [3, 5], "postur": 3, "25": 3, "2016": 3, "coordin": [3, 5], "track": [3, 5], "committe": 3, "oversight": [3, 5], "counsel": 3, "chair": 3, "headquart": 3, "cupertino": [3, 6], "center": [3, 5, 6], "formal": [3, 6], "conclud": 3, "uninstal": 3, "web": 3, "browser": 3, "june": 3, "contractu": 3, "desist": 3, "stai": 3, "grant": 3, "ndepart": 3, "justic": 3, "depart": [3, 5], "doj": 3, "district": 3, "attornei": 3, "jersei": 3, "redress": [3, 5], "anticompetit": 3, "nonmonetari": 3, "defens": [3, 5], "nepic": 3, "epic": 3, "northern": 3, "unfair": [3, 5], "enjoin": 3, "extern": [3, 5], "januari": 3, "motion": 3, "oppos": 3, "30": 3, "vacat": 3, "fourth": 3, "mine": 3, "nnot": 3, "aapl": 3, "nholder": 3, "na": 3, "301": 3, "npurchas": 3, "nshare": 3, "nperiod": 3, "ttotal": 3, "taverag": 3, "npaid": 3, "nannounc": 3, "napproxim": 3, "That": [3, 5, 6], "Be": 3, "nunder": 3, "njune": 3, "august": [3, 5], "nopen": 3, "negoti": 3, "t35": 3, "697": 3, "t224": 3, "naugust": 3, "31": 3, "t42": 3, "910": 3, "t221": 3, "39": 3, "nseptemb": 3, "t33": 3, "653": 3, "t222": 3, "86": 3, "ntotal": 3, "t112": 3, "260": 3, "t89": 3, "074": 3, "110": 3, "billion": 3, "previou": [3, 4, 6], "10b5": 3, "graph": 3, "cumul": 3, "reinvest": 3, "dow": 3, "supersector": 3, "27": 3, "2019": 3, "n2218": 3, "tseptemb": 3, "t100": 3, "t207": 3, "t273": 3, "t281": 3, "t322": 3, "t430": 3, "t113": 3, "t156": 3, "t131": 3, "t155": 3, "t210": 3, "ndow": 3, "t146": 3, "t216": 3, "t215": 3, "nfirst": 3, "nsecond": 3, "nthird": 3, "sequoia": 3, "nfourth": 3, "plu": 3, "nfiscal": 3, "six": 3, "realign": 3, "span": 3, "wherea": 3, "indirectli": 3, "n2024": 3, "tchang": 3, "t2023": 3, "t2022": 3, "namerica": 3, "t167": 3, "045": 3, "t3": 3, "t162": 3, "560": 3, "t169": 3, "658": 3, "neurop": 3, "t101": 3, "328": 3, "t7": 3, "294": 3, "t95": 3, "118": 3, "ngreater": 3, "t66": 3, "952": 3, "t72": 3, "559": 3, "t74": 3, "njapan": 3, "t25": 3, "052": 3, "t24": 3, "257": 3, "977": 3, "nrest": 3, "t30": 3, "t4": 3, "t29": 3, "615": 3, "t1": 3, "t391": 3, "035": 3, "t2": 3, "t383": 3, "285": 3, "t394": 3, "weak": [3, 5], "renminbi": 3, "yen": [3, 6], "t201": 3, "183": 3, "t200": 3, "583": 3, "t205": 3, "489": 3, "984": 3, "357": 3, "t40": 3, "177": 3, "t26": 3, "694": 3, "t28": 3, "300": [3, 4], "292": 3, "t37": 3, "005": 3, "t39": 3, "845": [3, 5], "t41": 3, "241": 3, "n96": 3, "169": 3, "t13": 3, "t85": 3, "t9": 3, "t78": 3, "129": 3, "amort": 3, "bundl": 3, "flat": 3, "ngross": 3, "t109": 3, "633": 3, "t108": 3, "803": 3, "t114": 3, "728": 3, "t71": 3, "t60": 3, "345": 3, "t56": 3, "054": 3, "t180": 3, "683": 3, "148": 3, "t170": 3, "782": 3, "t36": 3, "t73": 3, "t70": 3, "t46": 3, "t44": 3, "t43": 3, "noper": 3, "t31": 3, "370": 3, "t5": 3, "915": 3, "t14": 3, "251": 3, "npercentag": 3, "t8": 3, "nsell": 3, "administr": 3, "097": 3, "932": 3, "094": 3, "t6": 3, "t57": 3, "467": 3, "t54": 3, "847": 3, "t51": 3, "t15": 3, "headcount": 3, "nprovis": 3, "749": 3, "t16": 3, "741": 3, "t19": 3, "neffect": 3, "nstatutori": 3, "t21": 3, "aid": 3, "nliquid": 3, "unrestrict": 3, "140": 3, "ndebt": 3, "97": 3, "payabl": 3, "promissori": 3, "nleas": 3, "space": [3, 5], "nmanufactur": 3, "noncancel": 3, "ndeem": 3, "tcja": 3, "paid": 3, "nstate": 3, "fund": 3, "escrow": 3, "ncapit": 3, "95": 3, "nrecent": 3, "pronounc": 3, "nincom": 3, "fasb": 3, "asu": 3, "09": [3, 4, 5], "740": 3, "reconcili": 3, "reconcil": [3, 6], "disaggreg": 3, "prospect": 3, "novemb": [3, 5], "07": [3, 4, 5, 6], "280": 3, "maker": 3, "codm": 3, "alloc": [3, 5], "retrospect": 3, "ncritic": 3, "conform": [3, 6], "gaap": 3, "nuncertain": 3, "domest": 3, "taxat": 3, "resolut": 3, "conting": 3, "26": 3, "still": [3, 5], "ninterest": 3, "forth": 3, "hypothet": 3, "nsensit": 3, "nhypothet": 3, "nrate": 3, "npotenti": 3, "n100": 3, "tenor": 3, "ndeclin": 3, "755": 3, "089": 3, "nterm": 3, "nincreas": 3, "t139": 3, "t194": 3, "nforeign": 3, "express": [3, 6], "var": 3, "mont": 3, "carlo": 3, "interv": 3, "538": 3, "669": 3, "underli": [3, 6], "nindex": 3, "tpage": 3, "nconsolid": 3, "n29": 3, "n30": 3, "sheet": 3, "n31": 3, "n32": 3, "n33": 3, "nnote": 3, "n34": 3, "nreport": 3, "n48": 3, "nall": 3, "omit": [3, 6], "submiss": 3, "nyear": 3, "n2023": 3, "n2022": 3, "nnet": 3, "t294": 3, "866": 3, "t298": 3, "085": 3, "t316": 3, "199": 3, "t96": 3, "ncost": 3, "t185": 3, "233": 3, "t189": 3, "282": 3, "471": 3, "119": 3, "855": 3, "t22": 3, "075": 3, "352": 3, "t214": 3, "137": 3, "t223": 3, "546": 3, "t123": 3, "216": 3, "t119": 3, "437": 3, "t269": 3, "565": 3, "334": 3, "485": 3, "736": 3, "103": 3, "t93": 3, "995": 3, "t99": 3, "nearn": 3, "nbasic": 3, "ndilut": 3, "08": [3, 6], "343": 3, "783": 3, "744": 3, "215": 3, "963": 3, "095": 3, "812": 3, "547": 3, "325": 3, "819": 3, "nsee": 3, "translat": 3, "t395": 3, "765": 3, "511": 3, "unreal": 3, "832": 3, "t323": 3, "212": 3, "nadjust": 3, "337": 3, "717": 3, "394": 3, "138": 3, "850": 3, "563": 3, "104": 3, "t204": 3, "t253": 3, "816": 3, "899": 3, "272": 3, "t98": 3, "016": 3, "652": 3, "t88": 3, "531": 3, "nasset": 3, "ncurrent": 3, "ncash": 3, "943": 3, "965": 3, "228": 3, "590": 3, "naccount": 3, "410": 3, "508": 3, "nvendor": 3, "t32": 3, "833": 3, "477": 3, "ninventori": 3, "286": 3, "331": 3, "287": 3, "695": 3, "t152": 3, "987": 3, "t143": 3, "566": 3, "t91": 3, "479": 3, "544": 3, "t45": 3, "680": 3, "715": 3, "834": 3, "t64": 3, "758": 3, "t211": 3, "993": 3, "t209": 3, "017": 3, "t364": 3, "980": 3, "t352": 3, "nliabil": 3, "t68": 3, "960": 3, "t62": 3, "611": 3, "304": 3, "t58": 3, "829": 3, "ndefer": 3, "249": 3, "061": 3, "ncommerci": 3, "967": 3, "985": 3, "t10": 3, "912": 3, "822": 3, "t176": 3, "392": 3, "t145": 3, "308": 3, "750": 3, "888": 3, "t49": 3, "848": 3, "638": 3, "t308": 3, "030": 3, "t290": 3, "ncommit": 3, "nsharehold": 3, "400": 3, "116": 3, "786": 3, "550": 3, "n83": 3, "276": 3, "naccumul": 3, "deficit": 3, "154": 3, "214": 3, "172": 3, "452": 3, "950": 3, "146": 3, "t50": 3, "672": 3, "t63": 3, "090": 3, "nbegin": 3, "849": 3, "365": 3, "423": 3, "346": 3, "175": 3, "withheld": 3, "settlement": 3, "521": 3, "971": 3, "t12": 3, "034": 3, "t11": 3, "nend": 3, "t83": 3, "nretain": 3, "068": 3, "562": 3, "ndividend": 3, "218": 3, "793": 3, "612": 3, "099": 3, "454": 3, "846": 3, "77": 3, "046": 3, "186": 3, "109": 3, "t163": 3, "rsu": 3, "t0": 3, "98": 3, "94": 3, "32": 3, "737": 3, "929": 3, "ndepreci": 3, "445": 3, "519": 3, "688": 3, "038": 3, "266": 3, "227": 3, "006": 3, "788": 3, "356": 3, "271": 3, "520": 3, "618": 3, "484": 3, "731": 3, "684": 3, "499": 3, "020": 3, "889": 3, "448": 3, "552": 3, "031": 3, "t118": 3, "254": 3, "t110": 3, "543": 3, "t122": 3, "151": 3, "48": 3, "656": 3, "513": 3, "76": 3, "923": 3, "nproce": 3, "211": 3, "686": 3, "917": 3, "135": 3, "828": 3, "446": 3, "447": 3, "959": 3, "708": 3, "086": 3, "935": 3, "705": 3, "354": 3, "nfinanc": 3, "441": 3, "431": 3, "223": 3, "234": [3, 5], "025": 3, "841": 3, "nrepurchas": 3, "949": 3, "89": 3, "402": 3, "465": 3, "nrepay": 3, "958": 3, "repay": 3, "978": 3, "955": 3, "361": 3, "581": 3, "160": 3, "121": 3, "983": 3, "488": 3, "794": 3, "760": 3, "nsupplement": 3, "102": 3, "t18": 3, "679": 3, "573": 3, "33": 3, "nbasi": 3, "prior": 3, "reclassifi": 3, "nrevenu": 3, "remit": [3, 5], "straight": 3, "vest": 3, "treat": [3, 5], "sold": 3, "nderiv": 3, "nonleas": 3, "34": 3, "entitl": 3, "commenc": 3, "deliveri": 3, "stand": 3, "ssp": 3, "icloud": 3, "siri": 3, "discount": 3, "undeliv": 3, "unbil": 3, "n26": 3, "n37": 3, "proport": 3, "moder": [3, 5], "64": 3, "dilut": 3, "nnumer": 3, "ndenomin": 3, "nweight": 3, "312": 3, "316": 3, "856": 3, "antidilut": 3, "tunreal": 3, "ngain": 3, "tfair": 3, "nvalu": 3, "tcash": 3, "nequival": 3, "tcurrent": 3, "tnon": 3, "t27": 3, "nlevel": 3, "nmonei": 3, "t778": 3, "nmutual": 3, "n515": 3, "t105": 3, "t617": 3, "nsubtot": 3, "293": 3, "395": 3, "nu": 3, "treasuri": 3, "516": 3, "t212": 3, "087": 3, "380": 3, "agenc": [3, 5], "159": 3, "t703": 3, "t17": 3, "568": 3, "158": 3, "810": 3, "ncertif": 3, "deposit": 3, "t873": 3, "t387": 3, "t478": 3, "066": 3, "ncorpor": 3, "t65": 3, "622": 3, "t270": 3, "953": 3, "939": 3, "027": 3, "t47": 3, "886": 3, "nmunicip": 3, "t412": 3, "t405": 3, "t190": 3, "nmortgag": 3, "595": 3, "t175": 3, "403": 3, "t23": 3, "367": 3, "278": 3, "t132": 3, "t583": 3, "635": 3, "t128": 3, "056": 3, "966": 3, "t34": 3, "t160": 3, "t688": 3, "650": 3, "36": 3, "359": 3, "t481": 3, "n442": 3, "t428": 3, "t923": 3, "t909": 3, "406": 3, "114": 3, "468": 3, "136": 3, "t271": 3, "533": 3, "048": 3, "491": 3, "332": 3, "t320": 3, "t608": 3, "t76": 3, "840": 3, "956": 3, "890": 3, "t20": 3, "627": 3, "243": 3, "t628": 3, "t602": 3, "t192": 3, "t410": 3, "735": 3, "636": 3, "t344": 3, "t144": 3, "470": 3, "657": 3, "831": 3, "125": 3, "162": 3, "t173": 3, "752": 3, "corrobor": 3, "mortgag": 3, "classifi": [3, 5], "37": 3, "cross": [3, 5], "swap": 3, "remeasur": 3, "notion": 3, "069": 3, "730": 3, "575": 3, "493": 3, "t104": 3, "777": 3, "nhedg": 3, "433": 3, "505": 3, "247": 3, "ntrade": 3, "41": 3, "44": 3, "depreci": 3, "nland": 3, "690": 3, "nmachineri": 3, "t80": 3, "205": 3, "314": 3, "nleasehold": 3, "839": 3, "599": 3, "73": 3, "70": 3, "884": 3, "852": 3, "t55": 3, "906": 3, "601": 3, "703": 3, "010": 3, "457": 3, "634": 3, "391": 3, "neuropean": 3, "opinion": [3, 5], "1991": 3, "2007": 3, "irish": 3, "branch": 3, "2003": 3, "2014": 3, "2015": 3, "minist": 3, "juli": [3, 5], "annul": 3, "ecj": 3, "hear": 3, "asid": 3, "confirm": 3, "unrecogn": 3, "nfeder": 3, "571": 3, "080": 3, "644": 3, "265": 3, "801": 3, "726": 3, "570": 3, "298": 3, "49": 3, "t84": 3, "428": 3, "603": 3, "483": 3, "t347": 3, "t669": 3, "076": 3, "830": 3, "419": 3, "072": 3, "pretax": 3, "72": 3, "71": 3, "ncomput": 3, "885": 3, "012": 3, "124": 3, "518": 3, "nimpact": 3, "246": 3, "311": 3, "366": 3, "397": 3, "nexcess": 3, "893": 3, "871": 3, "192": 3, "739": 3, "ntax": 3, "carryforward": 3, "302": 3, "naccru": 3, "413": 3, "421": 3, "nunreal": 3, "173": 3, "168": 3, "873": 3, "743": 3, "nless": 3, "374": 3, "007": 3, "369": 3, "551": 3, "998": 3, "nright": 3, "179": 3, "nminimum": 3, "674": 3, "940": 3, "t511": 3, "t455": 3, "t490": 3, "805": 3, "202": 3, "indefinit": 3, "temporari": 3, "727": 3, "044": 3, "284": 3, "ndecreas": 3, "386": 3, "463": 3, "982": 3, "542": 3, "936": 3, "070": 3, "expir": 3, "statut": 3, "229": 3, "494": 3, "closur": 3, "intercompani": 3, "exceed": [3, 5], "multiyear": 3, "exercis": 3, "noncash": 3, "rou": 3, "tfinanci": 3, "t2024": 3, "tother": 3, "661": 3, "tproperti": 3, "015": 3, "303": 3, "676": 3, "t165": 3, "t752": 3, "t859": 3, "430": 3, "842": [3, 5], "tfinanc": 3, "n2025": 3, "820": 3, "t171": 3, "991": 3, "n2026": 3, "914": 3, "n2027": 3, "t59": 3, "733": 3, "n2028": 3, "360": 3, "t38": 3, "398": 3, "n2029": 3, "187": 3, "nthereaft": 3, "t837": 3, "undiscount": 3, "790": 3, "imput": 3, "376": 3, "534": 3, "t896": 3, "borrow": 3, "proce": 3, "nine": [3, 5], "nmatur": 3, "333": 3, "264": 3, "948": 3, "645": 3, "309": 3, "arrear": 3, "namount": 3, "n2013": 3, "nfix": 3, "2062": 3, "t97": 3, "341": 3, "03": 3, "65": 3, "t106": 3, "572": 3, "n97": 3, "nunamort": 3, "premium": 3, "321": 3, "358": 3, "113": 3, "662": 3, "930": 3, "342": 3, "800": 3, "180": 3, "88": 3, "ndure": 3, "425": 3, "426": 3, "372": 3, "589": 3, "055": 3, "appreci": 3, "four": 3, "holder": 3, "n2014": 3, "bonu": 3, "nrestrict": 3, "nnumber": 3, "nrsu": 3, "ngrant": 3, "naggreg": 3, "nfair": 3, "nbalanc": 3, "t240": 3, "427": 3, "t75": 3, "t150": 3, "861": 3, "501": 3, "768": 3, "87": 3, "101": 3, "878": 3, "144": 3, "t127": 3, "t135": 3, "91": 3, "456": 3, "78": 3, "59": 3, "t140": 3, "326": 3, "t158": 3, "204": 3, "350": 3, "002": [3, 4], "nuncondit": 3, "uncondit": 3, "206": 3, "440": 3, "156": 3, "t633": 3, "t670": 3, "226": 3, "45": 3, "nconting": 3, "accrual": 3, "nconcentr": 3, "attribut": [3, 5, 6], "46": 3, "t67": 3, "098": 3, "082": 3, "062": 3, "569": 3, "895": 3, "458": 3, "207": 3, "nonrecur": 3, "t142": 3, "196": 3, "t138": 3, "t147": 3, "859": 3, "nchina": 3, "n66": 3, "t181": 3, "887": 3, "t172": 3, "269": 3, "nlong": 3, "664": 3, "797": 3, "778": 3, "219": 3, "47": 3, "nopinion": 3, "nwe": 3, "fairli": 3, "pcaob": 3, "sponsor": 3, "treadwai": 3, "2013": 3, "unqualifi": 3, "thereon": 3, "nthese": 3, "misstat": 3, "fraud": 3, "ndescript": 3, "naudit": 3, "nhow": 3, "nmatter": 3, "qualifi": 3, "letter": 3, "advisor": 3, "ernst": 3, "llp": 3, "auditor": 3, "2009": 3, "nsan": 3, "jose": 3, "nnovemb": 3, "coso": 3, "nour": 3, "ndefinit": 3, "mainten": 3, "disposit": 3, "receipt": 3, "nevalu": 3, "nbase": 3, "13a": 3, "15d": 3, "ninher": 3, "met": 3, "appear": [3, 6], "paragraph": 3, "51": [3, 6], "ninsid": 3, "deirdr": 3, "brien": 3, "vice": 3, "presid": 3, "affirm": 3, "april": 3, "withhold": 3, "remitt": 3, "mr": 3, "copi": [3, 4], "solicit": 3, "id": 3, "00042": 3, "nincorpor": 3, "texhibit": 3, "descript": [3, 6], "tform": 3, "tfile": 3, "nrestat": 3, "namend": 3, "bylaw": 3, "nindentur": 3, "york": [3, 6], "mellon": 3, "truste": 3, "noffic": 3, "certif": 3, "2018": 3, "85": 3, "2043": 3, "05": 3, "2044": 3, "februari": 3, "55": 3, "2045": 3, "900": 3, "700": 3, "60": 3, "250": 3, "2036": 3, "2046": 3, "450": 3, "2047": 3, "2049": 3, "2030": 3, "2050": 3, "2060": 3, "2028": 3, "2041": 3, "2051": 3, "2061": 3, "2032": 3, "2052": 3, "54": 3, "2033": 3, "2053": 3, "ceo": 3, "n12": 3, "nsubsidiari": 3, "n23": 3, "nconsent": 3, "n24": 3, "npower": 3, "signatur": 3, "nrule": 3, "nsection": 3, "1350": 3, "n101": 3, "ninlin": 3, "xbrl": 3, "n104": 3, "inlin": 3, "compensatori": 3, "herewith": 3, "furnish": 3, "herebi": 3, "undertak": 3, "56": 3, "nsignatur": 3, "npursuant": 3, "duli": 3, "undersign": 3, "thereunto": 3, "ndate": 3, "nby": 3, "luca": [3, 6], "maestri": 3, "nluca": 3, "nsenior": 3, "nchief": 3, "nknow": 3, "THESE": 3, "appoint": 3, "cook": 3, "jointli": 3, "hi": [3, 6], "her": 3, "substitut": 3, "him": 3, "thereto": 3, "therewith": 3, "ratifi": 3, "done": [3, 6], "virtu": 3, "hereof": 3, "nname": 3, "ttitl": 3, "tdate": 3, "tchief": 3, "tnovemb": 3, "ntimothi": 3, "tsenior": 3, "kondo": 3, "nchri": 3, "wanda": 3, "austin": 3, "nwanda": 3, "gorski": 3, "tdirector": 3, "nalex": 3, "andrea": [3, 5], "jung": 3, "nandrea": 3, "arthur": 3, "levinson": 3, "narthur": 3, "monica": 3, "lozano": 3, "nmonica": 3, "ronald": 3, "sugar": 3, "nronald": 3, "susan": 3, "wagner": 3, "nsusan": 3, "57": 3, "turbo": [3, 4, 6], "invdestacksmeticsisdict": 3, "setispect": 3, "20cyan": 3, "evaluationseld": 3, "anvis": 3, "droitent": 3, "discernminerv": 3, "versbobprefvers": 3, "vo\u8be5": 3, "option\u548c": 3, "meio": 3, "\u0432\u0440\u0435\u043ccisco": 3, "dellaischenpoihscap": 3, "geme": 3, "gettim": 3, "unscal": 3, "vocabulari": [3, 6], "closer": 3, "sharpen": 3, "uniform": 3, "raschka": 3, "repetit": [3, 4, 6], "radic": 3, "grappl": 3, "safer": [3, 5], "fascin": 3, "spontan": 3, "aren": 3, "linear": 3, "absent": [3, 5], "coax": 3, "journei": 3, "suddenli": 3, "manifest": 3, "deliber": [3, 5], "contend": 3, "70b": 3, "rethink": 3, "tutor": 3, "children": [3, 5], "verifi": [3, 6], "predefin": [3, 6], "weren": 3, "kind": 3, "usual": 3, "resist": 3, "quantif": 3, "contamin": [3, 5], "massiv": [3, 5], "truli": 3, "unseen": 3, "longitudin": 3, "mostli": [3, 6], "versu": 3, "latter": 3, "tailor": 3, "great": [3, 6], "cognit": 3, "misinform": [3, 5], "citat": 3, "tempor": 3, "scientif": 3, "disclaim": 3, "referr": 3, "incorrect": [3, 5], "demograph": [3, 5], "stereotyp": [3, 5], "societ": [3, 5], "pii": 3, "anonym": 3, "leakag": [3, 5], "carryov": 3, "multi": [3, 6], "mathemat": 3, "fallaci": 3, "causal": 3, "think": [3, 5], "idiom": 3, "sarcasm": 3, "terminologi": 3, "lingual": 3, "misunderstand": 3, "syntax": 3, "scan": 3, "compat": [3, 6], "scalabl": [3, 4, 5], "overconfid": 3, "clariti": [3, 4, 6], "audienc": 3, "densiti": 3, "satisfact": [3, 6], "misus": [3, 5], "moral": 3, "co2": 3, "energi": 3, "consumpt": 3, "server": [3, 6], "imag": 3, "audio": 3, "etc": [3, 6], "truth": [3, 5, 6], "layer": [3, 4, 6], "palm": 3, "easi": [3, 4], "synthet": [3, 5, 6], "timeout": 3, "variat": 3, "inter": 3, "rater": 3, "ti": 3, "tier": [3, 5], "holist": 3, "fast": [3, 5, 6], "experiment": [3, 6], "vi": 3, "categor": [3, 6], "intrins": 3, "extrins": 3, "sequenc": [3, 6], "perplex": 3, "downstream": [3, 6], "synthesi": 3, "discret": 3, "prefix": [3, 5], "roug": 3, "bleu": 3, "bilingu": 3, "understudi": 3, "overlap": [3, 4], "favor": [3, 6], "breviti": 3, "insensit": 3, "semant": [3, 4], "orient": 3, "gist": 3, "meteor": 3, "synonym": 3, "stem": [3, 6], "paraphras": 3, "alongsid": [3, 5], "computation": [3, 4], "cider": 3, "consensu": 3, "tf": 3, "idf": 3, "caption": 3, "reliant": 3, "corpu": 3, "ter": 3, "edit": [3, 5], "hypothesi": 3, "penal": 3, "bertscor": 3, "embed": [3, 4], "bert": 3, "spice": 3, "proposit": 3, "scene": 3, "pure": 3, "analyst": [3, 4], "rouge_1": 3, "rouge_2": 3, "ideal": [3, 6], "cheaper": 3, "evaluate_summari": 3, "unigram": 3, "bigram": 3, "absl": 3, "py": 3, "rouge_scor": 3, "generated_summari": 3, "reference_summari": 3, "google_bleu": 3, "bleu_scor": 3, "rouge1": 3, "rouge2": 3, "arbitrari": 3, "chosen": 3, "sentence1": 3, "cat": 3, "sat": 3, "mat": 3, "sentence2": 3, "ate": 3, "3333333333333333": 3, "7272727272727272": 3, "4444444444444445": 3, "generate_summari": 3, "summir": 3, "liner": 3, "excerpt": 3, "evaluate_summary_model": 3, "model_benchmark": 3, "models_test": 3, "benchmark_summari": 3, "model_summari": 3, "evaluation_result": 3, "analyz": [3, 4, 5, 6], "statu": 3, "concis": 3, "element": [3, 5, 6], "verbos": 3, "peripher": 3, "quit": [3, 6], "miss": 3, "convei": [3, 4], "breadth": 3, "Of": 3, "vibe": 3, "visualize_prompt_comparison": 3, "matplotlib": 3, "radar": 3, "radar_plot": 3, "tmp": 3, "ipykernel_1652501": 3, "940173201": 3, "userwarn": 3, "figurecanvasagg": 3, "largest": 3, "deviat": [3, 6], "granular": [3, 4], "likert": 3, "pairwis": 3, "ensembl": 3, "repeatedli": 3, "fluenci": 3, "refin": 3, "narr": 3, "notabl": [3, 6], "henc": 3, "integ": 3, "rubric": 3, "hollist": 3, "judgeevalu": 3, "grammar": [3, 6], "evaluate_with_llm": 3, "criterion": 3, "judge_model": 3, "candidate_summari": 3, "grammat": 3, "y": [3, 5, 6], "z": 3, "w": [3, 4], "benchmark_model": 3, "test_model": 3, "input_text": [3, 4], "trillion": [3, 6], "evals_list": 3, "1775618912": 3, "variant": 3, "slightli": 3, "drift": 3, "lowest": 3, "degrad": [3, 6], "firstli": 3, "overhead": 3, "prefer": [3, 5, 6], "egocentr": 3, "tight": 3, "aproach": 3, "workflow": [3, 6], "aplic": 3, "clearli": [3, 5, 6], "earlier": 3, "depict": [3, 6], "correl": 3, "multilingu": 3, "golden": 3, "languang": 3, "arena": 3, "blind": 3, "randomli": 3, "loop": 3, "customiz": 3, "irrelev": 3, "unhelp": [3, 5], "occasion": 3, "rare": 3, "perfectli": 3, "cater": 3, "critiqu": [3, 5], "elo": 3, "thought": [3, 6], "exam": 3, "probe": [3, 5], "certifi": 3, "began": 3, "glue": 3, "entail": 3, "baselin": [3, 5], "superglu": 3, "deeper": [3, 4], "successor": 3, "grew": 3, "big": 3, "bench": 3, "srivastava": 3, "arithmet": 3, "truthfulqa": 3, "multitask": 3, "hendryck": 3, "multidisciplinari": 3, "stanford": 3, "helm": 3, "multidimension": 3, "surround": [3, 6], "humanev": 3, "lmsy": 3, "brought": 3, "dialogu": 3, "chiang": 3, "alpacaev": 3, "duboi": 3, "mt": 3, "Their": [3, 6], "render": 3, "crowdsourc": 3, "livebench": 3, "white": [3, 5], "resili": 3, "meaningfulli": 3, "zebralog": 3, "grid": 3, "puzzl": 3, "brailsford": 3, "1999": 3, "lsat": 3, "hous": 3, "clue": 3, "strateg": [3, 5, 6], "deduct": 3, "arriv": 3, "programmat": [3, 6], "2x2": 3, "6x6": 3, "reductio": 3, "ad": [3, 6], "absurdum": 3, "sonnet": [3, 4], "hard": 3, "10b": 3, "counterfactu": 3, "came": 3, "arc": 3, "prize": 3, "chollet": 3, "mike": [3, 5], "knoop": 3, "founder": 3, "zapier": 3, "fran\u00e7oi": 3, "creator": 3, "agi": 3, "kera": 3, "genuin": 3, "possess": 3, "elementari": 3, "novelti": 3, "someth": 3, "wouldn": 3, "interpol": 3, "synthes": 3, "fly": 3, "brute": 3, "pixel": 3, "perfect": 3, "unbeaten": 3, "win": 3, "poorli": 3, "recombin": 3, "spur": [3, 5], "takeawai": 3, "fourrier": 3, "bespok": 3, "sdk": 3, "autoregress": 3, "sub": 3, "liter": 3, "disturb": 3, "zero": [3, 5, 6], "varianc": 3, "yt": 3, "ut": 3, "suppos": [3, 6], "ol": 3, "heteroscedast": 3, "regress": 3, "wish": 3, "lag": [3, 5], "bivari": 3, "evaluation_track": 3, "evaluationtrack": 3, "model_config": 3, "basemodelconfig": 3, "parallelismmanag": 3, "pipelineparamet": 3, "envconfig": 3, "is_accelerate_avail": 3, "datetim": 3, "timedelta": 3, "initprocessgroupkwarg": 3, "create_evaluation_pipelin": 3, "cache_dir": 3, "pretrain": 3, "float16": 3, "max_sampl": 3, "kwargs_handl": 3, "3000": 3, "save_detail": 3, "pipeline_param": 3, "launcher_typ": 3, "env_config": 3, "override_batch_s": 3, "use_chat_templ": 3, "trust_remote_cod": 3, "pipeline_paramet": 3, "schemat": [3, 4], "vllm": [3, 6], "tgi": 3, "storag": [3, 5], "num_few_shot": 3, "vertic": 3, "bar": 3, "bigbench": 3, "winogrand": 3, "hellaswag": 3, "nlp": 3, "save_and_push_result": 3, "show_result": 3, "model_arg": 3, "send": [3, 6], "serverless": 3, "inference_server_address": 3, "inference_server_auth": 3, "model_id": 3, "null": 3, "bash": 3, "command": 3, "model_config_path": 3, "endpoint_model": 3, "llama3": [3, 4], "qwen2": [3, 6], "smollm2": 3, "3b": 3, "alibaba": [3, 6], "5b": [3, 6], "hui": 3, "allal": 3, "cluster": 3, "noteworthi": 3, "grain": [3, 6], "salt": [3, 6], "exponenti": 3, "modular": 3, "offici": 3, "revisit": 3, "trace": 3, "langchain_tracing_v2": 3, "langchain_api_kei": 3, "hf_evalu": 3, "langsmith_evalu": 3, "ls_client": 3, "dataset_nam": 3, "create_dataset": 3, "create_exampl": 3, "dataset_id": 3, "calculate_scor": 3, "reference_output": 3, "oai_client": 3, "xp_model_nam": 3, "lastli": 3, "run_evalu": 3, "And": 3, "upload_result": 3, "experiment_prefix": 3, "num_repetit": 3, "386a3620": 3, "9e1cc3cb": 3, "9d6a": 3, "4356": 3, "ab34": 3, "138e0abe8be4": 3, "8741976e": 3, "5268": 3, "4b75": 3, "949f": 3, "99477dde5d64": 3, "selectedsess": 3, "b831dc1e": 3, "90bc": 3, "4ed8": 3, "8080": 3, "fb42444724d6": 3, "4it": 3, "latest": [3, 4, 6], "tobia": 3, "evaluate_modul": 3, "6fc70b7be0088120a372dfdd5d320b39b8bb3630cb8029b193941d9376e86bb0": 3, "tue": 3, "nov": 3, "couldn": 3, "5it": 3, "5053784e": 3, "64445871": 3, "a53c": 3, "44b1": 3, "a422": 3, "4f49b2f9656f": 3, "69": 3, "4b29f3c9": 3, "9ef7e39a": 3, "2add": 3, "410c": 3, "89f8": 3, "9f1a8b198cf1": 3, "61": 3, "insert": 3, "combined_df": 3, "concat": 3, "ignore_index": 3, "execution_tim": 3, "example_id": 3, "333333": 3, "224388": 3, "feb10f92": 3, "3167": 3, "41f3": 3, "bb1c": 3, "d271153a31a8": 3, "5b196b22": 3, "9f4c": 3, "489c": 3, "b020": 3, "7823208b42d6": 3, "348101": 3, "722464": 3, "c310f159": 3, "064a": 3, "4035": 3, "97c3": 3, "a25bbf43abc2": 3, "386076": 3, "704104": 3, "f7f24899": 3, "dd50": 3, "409e": 3, "93cc": 3, "6fb1622b60bf": 3, "443038": 3, "725059": 3, "242856d6": 3, "efb5": 3, "4101": 3, "b1cf": 3, "5805532838ac": 3, "373418": 3, "795302": 3, "ce975169": 3, "a0ab": 3, "40ce": 3, "8e32": 3, "efa28d06079d": 3, "stat": 3, "groupbi": 3, "agg": 3, "sort": 3, "sort_valu": 3, "figur": [3, 6], "subplot": 3, "pyplot": 3, "plt": 3, "numpi": 3, "np": 3, "ax1": 3, "ax2": 3, "figsiz": 3, "2ecc71": 3, "3498db": 3, "e74c3c": 3, "bleu_mean": 3, "bleu_std": 3, "enumer": [3, 4], "errorbar": 3, "yerr": 3, "fmt": 3, "markers": 3, "capsiz": 3, "set_ylabel": 3, "set_titl": 3, "set_xtick": 3, "set_xticklabel": 3, "rotat": 3, "set_ylim": 3, "bottom": 3, "legend": 3, "exec_mean": 3, "exec_std": 3, "tight_layout": 3, "ndetail": 3, "4038": 3, "0453": 3, "7815": 3, "0433": 3, "3768": 3, "0424": 3, "8343": 3, "2208": 3, "3519": 3, "0775": 3, "9122": 3, "1482": 3, "377": 3, "042": 3, "078": 3, "slower": 3, "04": [3, 4], "latenc": [3, 4], "speed": 3, "interestingli": 3, "decoupl": 3, "reload": 3, "facilit": [3, 5], "promptfooconfig": 3, "model_comparison": 3, "pretti": 3, "dump": 3, "default_flow_styl": 3, "sort_kei": 3, "prompt1": 3, "defaulttest": 3, "1000m": 3, "millisecond": 3, "eval_data": 3, "latency_m": 3, "totallatencym": 3, "token_usag": 3, "tokenusag": 3, "assert_pass": 3, "assertpasscount": 3, "assert_fail": 3, "assertfailcount": 3, "prompt_token": 3, "num_request": 3, "numrequest": 3, "2463": 3, "000035": 3, "3773": 3, "004620": 3, "1669": 3, "000091": 3, "1669m": 3, "highest": 3, "3773m": 3, "00462": 3, "promptfool": 3, "manual": [3, 5], "redefin": 3, "prompt_comparison": 3, "prompt2": 3, "prompt3": 3, "prompt_fil": 3, "prompt_cont": 3, "BE": 3, "again": 3, "prompt_id": 3, "promptid": 3, "gradingresult": 3, "df_raw": 3, "reset_index": 3, "eas": [3, 5], "seamless": 3, "hf": 3, "plain": 3, "vanilla": 3, "defi": 3, "accustom": 3, "legaci": 3, "unsustain": 3, "prd": 3, "cultiv": [3, 5], "organiz": 3, "stagnat": 3, "alb": 3, "loubna": 3, "anton": 3, "lozhkov": 3, "bakouch": 3, "gabriel": [3, 5], "mart\u00edn": 3, "bl\u00e1zquez": 3, "lewi": 3, "tunstal": 3, "agust\u00edn": 3, "piquer": 3, "andr": 3, "marafioti": 3, "cyril": 3, "zakka": 3, "leandro": 3, "von": 3, "werra": 3, "wolf": 3, "are24": 3, "judgearena": 3, "bps99": 3, "salli": 3, "pott": 3, "barbara": 3, "557": 3, "sciencedirect": 3, "s0377221798003646": 3, "doi": [3, 5, 6], "1016": 3, "s0377": 3, "2217": 3, "00364": 3, "ctj": 3, "jerri": [3, 5], "tworek": [3, 5], "heewoo": [3, 5], "jun": [3, 5], "qime": [3, 5], "henriqu": [3, 5], "pond": [3, 5], "de": [3, 5], "oliveira": [3, 5], "pinto": [3, 5], "harri": [3, 5], "yuri": 3, "burda": 3, "greg": [3, 5], "brockman": [3, 5], "raul": [3, 5], "puri": [3, 5], "gretchen": [3, 5], "krueger": [3, 5], "petrov": [3, 5], "heidi": 3, "khlaaf": 3, "girish": [3, 5], "sastri": [3, 5], "brook": [3, 5], "chan": [3, 5], "grai": [3, 5], "ryder": [3, 5], "mikhail": [3, 5], "pavlov": [3, 5], "alethea": [3, 5], "lukasz": 3, "kaiser": [3, 5], "mohammad": [3, 5], "bavarian": [3, 5], "clemen": [3, 5], "winter": [3, 5], "philipp": 3, "tillet": [3, 5], "felip": [3, 5], "petroski": [3, 5], "dave": [3, 5], "cum": [3, 5], "matthia": 3, "plappert": 3, "fotio": 3, "chantzi": [3, 5], "barn": 3, "ariel": 3, "herbert": 3, "voss": [3, 5], "hebgen": 3, "guss": 3, "nichol": 3, "paino": [3, 5], "nikola": [3, 5], "tezak": [3, 5], "jie": [3, 5], "babuschkin": [3, 5], "suchir": [3, 5], "balaji": [3, 5], "shantanu": [3, 5], "jain": [3, 5], "saunder": 3, "hess": [3, 5], "carr": 3, "josh": [3, 5], "achiam": [3, 5], "vedant": 3, "misra": 3, "evan": [3, 5], "morikawa": [3, 5], "matthew": 3, "knight": [3, 5], "mile": [3, 5], "brundag": [3, 5], "mira": [3, 5], "murati": [3, 5], "kati": [3, 5], "mayer": [3, 5], "bob": [3, 5, 6], "mcgrew": [3, 5], "ilya": [3, 5], "sutskev": [3, 5], "wojciech": [3, 5], "zaremba": [3, 5], "2107": 3, "03374": 3, "cz": 3, "lianmin": 3, "ying": 3, "sheng": 3, "anastasio": 3, "angelopoulo": 3, "tianl": 3, "dacheng": 3, "banghua": 3, "jordan": [3, 5], "gonzalez": 3, "ion": 3, "stoica": 3, "04132": 3, "cho24a": 3, "francoi": 3, "arcpriz": 3, "cho24b": 3, "dglh24": 3, "yann": 3, "bal\u00e1z": 3, "galambosi": 3, "tatsunori": 3, "hashimoto": 3, "debia": 3, "04475": 3, "fac24a": 3, "wiki": [3, 6], "fac24b": 3, "fac24c": 3, "model_doc": 3, "fac24d": 3, "cookbook": [3, 5], "llm_judg": 3, "fac24f": 3, "fhwt23": 3, "cl\u00e9mentin": 3, "nathan": 3, "habib": 3, "hbb": 3, "collin": 3, "burn": 3, "steven": [3, 5], "basart": 3, "zou": 3, "manta": 3, "mazeika": 3, "song": [3, 5], "steinhardt": 3, "03300": 3, "hbd": 3, "du": 3, "maxwel": 3, "forb": 3, "yejin": 3, "choi": 3, "curiou": 3, "neural": [3, 6], "degener": 3, "1904": 3, "09751": 3, "hyc": 3, "binyuan": 3, "zeyu": 3, "cui": 3, "jiaxi": 3, "dayiheng": 3, "lei": [3, 5], "tianyu": 3, "jiajun": 3, "bowen": [3, 5], "kai": [3, 5], "dang": 3, "coder": 3, "preprint": [3, 6], "2409": [3, 5], "12186": 3, "lx": 3, "zhen": 3, "xiaohan": 3, "jia": 3, "yuxuan": 3, "lai": 3, "chongyang": 3, "shuai": 3, "ma": [3, 5], "nlg": 3, "07103": 3, "lbl": 3, "bommasani": 3, "toni": 3, "dimitri": 3, "tsipra": 3, "dilara": 3, "soylu": 3, "michihiro": 3, "yasunaga": 3, "yian": 3, "deepak": 3, "narayanan": 3, "yuhuai": 3, "benjamin": [3, 5], "newman": 3, "binhang": 3, "bobbi": 3, "ce": 3, "christian": [3, 5], "cosgrov": 3, "r\u00e9": 3, "acosta": 3, "nava": [3, 5], "drew": 3, "hudson": 3, "zelikman": 3, "esin": 3, "durmu": 3, "faisal": 3, "ladhak": 3, "frieda": 3, "rong": 3, "hongyu": 3, "ren": 3, "huaxiu": 3, "yao": [3, 5], "jue": 3, "keshav": 3, "santhanam": 3, "laurel": 3, "lucia": 3, "mert": 3, "yuksekgonul": 3, "mirac": 3, "suzgun": 3, "guha": 3, "niladri": 3, "chatterji": 3, "omar": 3, "khattab": 3, "henderson": 3, "qian": [3, 5], "chi": [3, 6], "sang": 3, "shibani": [3, 5], "santurkar": [3, 5], "surya": 3, "icard": 3, "tianyi": 3, "vishrav": 3, "chaudhari": 3, "xuechen": 3, "yuhui": 3, "yuta": 3, "koreeda": 3, "2211": 3, "09110": 3, "lbc24": 3, "ronan": 3, "bra": 3, "allenai": 3, "lhe22": 3, "stephani": [3, 5], "owain": 3, "mimic": 3, "falsehood": 3, "2109": 3, "07958": 3, "pro24": 3, "dev": 3, "ras24": 3, "sebastian": 3, "scratch": 3, "1633437166": 3, "srr": 3, "aarohi": 3, "abhinav": 3, "rastogi": 3, "abhishek": 3, "rao": 3, "abu": 3, "awal": 3, "shoeb": 3, "abubakar": 3, "abid": 3, "adam": [3, 5], "fisch": 3, "santoro": 3, "aditya": [3, 5], "gupta": 3, "adri\u00e0": 3, "garriga": 3, "alonso": 3, "agnieszka": 3, "kluska": 3, "aitor": 3, "lewkowycz": 3, "akshat": 3, "warstadt": 3, "alexand": [3, 5, 6], "kocurek": 3, "ali": [3, 5], "safaya": 3, "tazarv": 3, "aman": 3, "hussain": 3, "dsouza": 3, "ambros": 3, "slone": 3, "ameet": 3, "rahan": 3, "anantharaman": 3, "iyer": 3, "ander": 3, "andreassen": 3, "madotto": 3, "santilli": 3, "stuhlm\u00fcller": 3, "la": 3, "lampinen": 3, "angelica": 3, "anh": 3, "vuong": 3, "animesh": 3, "gottardi": 3, "antonio": 3, "norelli": 3, "anu": 3, "venkatesh": 3, "arash": 3, "gholamidavoodi": 3, "arfa": 3, "tabassum": 3, "arul": 3, "menez": 3, "arun": [3, 5], "kirubarajan": 3, "asher": 3, "mullokandov": 3, "ashish": 3, "sabharw": 3, "herrick": 3, "avia": 3, "efrat": 3, "aykut": 3, "erdem": 3, "ayla": 3, "karaka\u015f": 3, "bao": [3, 5], "loe": 3, "barret": [3, 5], "zoph": [3, 5], "bart\u0142omiej": 3, "bojanowski": 3, "batuhan": 3, "\u00f6zyurt": 3, "behnam": 3, "hedayatnia": 3, "neyshabur": 3, "inden": 3, "benno": 3, "stein": 3, "berk": 3, "ekmekci": 3, "blake": 3, "howald": 3, "bryan": 3, "orinion": 3, "diao": 3, "dour": 3, "stinson": 3, "cedrick": 3, "argueta": 3, "c\u00e9sar": 3, "ferri": 3, "ram\u00edrez": 3, "chandan": 3, "charl": 3, "rathkopf": 3, "chenlin": 3, "meng": 3, "chitta": 3, "baral": 3, "chiyu": 3, "callison": 3, "burch": 3, "wait": 3, "voigt": 3, "cindi": 3, "ramirez": 3, "clara": 3, "rivera": 3, "clemencia": 3, "siro": 3, "colin": 3, "raffel": 3, "courtnei": 3, "ashcraft": 3, "cristina": 3, "garbacea": 3, "damien": [3, 5], "sileo": 3, "garrett": 3, "kilman": 3, "roth": 3, "daniel": [3, 5], "freeman": 3, "khashabi": 3, "levi": [3, 5], "mosegu\u00ed": 3, "gonz\u00e1lez": 3, "perszyk": 3, "danqi": 3, "daphn": 3, "ippolito": 3, "dar": 3, "gilboa": 3, "dohan": [3, 5], "drakard": 3, "jurgen": 3, "debajyoti": 3, "datta": 3, "deni": 3, "emelin": 3, "kleyko": 3, "deniz": 3, "yuret": 3, "derek": [3, 5], "tam": [3, 6], "dieuwk": 3, "hupk": 3, "diganta": 3, "dilyar": 3, "buzan": 3, "coelho": 3, "mollo": 3, "diyi": 3, "ho": 3, "dylan": 3, "schrader": 3, "ekaterina": 3, "shutova": 3, "ekin": 3, "dogu": 3, "cubuk": 3, "elad": 3, "segal": 3, "eleanor": 3, "hagerman": 3, "donowai": 3, "elli": 3, "pavlick": 3, "rodola": 3, "emma": 3, "lam": 3, "chu": [3, 5], "erkut": 3, "erni": 3, "dyer": 3, "jerzak": 3, "eunic": 3, "engefu": 3, "manyasi": 3, "evgenii": 3, "zheltonozhskii": 3, "fanyu": 3, "xia": 3, "fatemeh": 3, "siar": 3, "fernando": 3, "mart\u00ednez": 3, "plume": 3, "francesca": 3, "happ\u00e9": 3, "gaurav": 3, "genta": 3, "indra": 3, "winata": 3, "gerard": 3, "melo": 3, "germ\u00e1n": 3, "kruszewski": 3, "giambattista": [3, 5], "parascandolo": [3, 5], "giorgio": 3, "mariani": 3, "gloria": 3, "gonzalo": 3, "jaimovitch": 3, "l\u00f3pez": 3, "gregor": 3, "betz": 3, "gui": 3, "gur": 3, "hana": 3, "galijasev": 3, "rashkin": 3, "hannaneh": 3, "hajishirzi": 3, "harsh": 3, "hayden": 3, "bogar": 3, "henri": [3, 5], "shevlin": 3, "hinrich": 3, "sch\u00fctze": 3, "hiromu": 3, "yakura": 3, "hongm": 3, "hugh": 3, "mee": 3, "wong": [3, 5], "ng": [3, 5], "isaac": 3, "nobl": 3, "jaap": 3, "jumelet": 3, "geissing": 3, "jaehoon": 3, "jaim": 3, "fern\u00e1ndez": 3, "fisac": 3, "simon": 3, "koppel": 3, "koco\u0144": 3, "jana": 3, "thompson": [3, 5], "janel": 3, "wingfield": 3, "jarema": 3, "radom": 3, "jascha": 3, "sohl": [3, 5], "dickstein": 3, "phang": 3, "yosinski": 3, "jekaterina": 3, "novikova": 3, "jell": 3, "bosscher": 3, "jennif": 3, "marsh": 3, "jeroen": 3, "taal": 3, "jess": [3, 5], "engel": 3, "jesujoba": 3, "alabi": 3, "jiam": 3, "jillian": 3, "joan": 3, "waweru": 3, "burden": 3, "bali": 3, "jonathan": [3, 5], "batcheld": 3, "berant": 3, "j\u00f6rg": 3, "frohberg": 3, "jo": 3, "rozen": 3, "orallo": 3, "boudeman": 3, "guerr": 3, "tenenbaum": 3, "joyc": 3, "chua": 3, "kanclerz": 3, "karen": 3, "livescu": 3, "karl": 3, "krauth": 3, "karthik": 3, "gopalakrishnan": 3, "katerina": 3, "ignatyeva": 3, "katja": 3, "markert": 3, "kaustubh": 3, "dhole": 3, "gimpel": 3, "omondi": 3, "kori": 3, "mathewson": 3, "kristen": 3, "chiafullo": 3, "ksenia": 3, "shkaruta": 3, "shridhar": 3, "kyle": [3, 5], "mcdonel": 3, "richardson": 3, "laria": 3, "reynold": 3, "leo": [3, 5], "liam": [3, 5], "dugan": 3, "lianhui": 3, "qin": [3, 5], "lidia": 3, "contrera": 3, "ochando": 3, "morenc": 3, "moschella": 3, "luci": 3, "ludwig": 3, "schmidt": [3, 5], "luheng": 3, "olivero": 3, "col\u00f3n": 3, "metz": [3, 5], "l\u00fctfi": 3, "kerem": 3, "\u015fenel": 3, "maarten": [3, 5], "bosma": 3, "sap": [3, 5], "maartj": 3, "hoev": 3, "maheen": 3, "farooqi": 3, "manaal": 3, "faruqui": 3, "marco": 3, "baturan": 3, "marelli": 3, "maru": 3, "maria": 3, "quintana": 3, "tolkiehn": 3, "mario": [3, 5], "giulianelli": 3, "martha": 3, "potthast": 3, "leavitt": 3, "hagen": 3, "m\u00e1ty\u00e1": 3, "schubert": 3, "medina": [3, 5], "orduna": 3, "baitemirova": 3, "melodi": 3, "arnaud": 3, "melvin": 3, "mcelrath": 3, "yee": 3, "cohen": 3, "ivanitskii": 3, "starritt": 3, "strube": 3, "micha\u0142": 3, "sw\u0119drowski": 3, "michel": [3, 5], "bevilacqua": 3, "mihir": 3, "kale": 3, "cain": 3, "mime": 3, "mitch": 3, "walker": 3, "mo": 3, "tiwari": 3, "mohit": 3, "bansal": 3, "moin": 3, "aminnaseri": 3, "mor": 3, "geva": 3, "mozhdeh": 3, "gheini": 3, "mukund": 3, "varma": 3, "nanyun": 3, "peng": [3, 5], "nayeon": 3, "neta": 3, "krakov": 3, "doiron": 3, "nicol": 3, "martinez": 3, "nikita": 3, "nangia": 3, "nikla": 3, "decker": 3, "muennighoff": 3, "nitish": [3, 5], "shirish": [3, 5], "keskar": [3, 5], "niveditha": 3, "constant": 3, "fiedel": 3, "nuan": 3, "wen": 3, "oliv": [3, 5], "agha": 3, "elbaghdadi": 3, "omer": 3, "moreno": 3, "casar": 3, "parth": 3, "doshi": 3, "pascal": 3, "fung": 3, "pu": 3, "vicol": 3, "pegah": 3, "alipoormolabashi": 3, "peiyuan": 3, "eckerslei": 3, "phu": 3, "mon": 3, "htut": 3, "pinyu": 3, "hwang": 3, "piotr": 3, "mi\u0142kowski": 3, "piyush": 3, "patil": 3, "pouya": 3, "pezeshkpour": 3, "priti": 3, "oli": 3, "qiaozhu": 3, "qing": 3, "lyu": 3, "qinlang": 3, "rabin": 3, "banjad": 3, "rachel": [3, 5], "etta": 3, "rudolph": 3, "raefer": 3, "rahel": 3, "haback": 3, "ramon": 3, "risco": 3, "rapha\u00ebl": 3, "milli\u00e8r": 3, "rhythm": 3, "garg": 3, "rif": 3, "saurou": 3, "riku": 3, "arakawa": 3, "robb": 3, "raymaek": 3, "frank": [3, 5], "rohan": 3, "sikand": 3, "roman": [3, 5], "novak": 3, "sitelew": 3, "lebra": 3, "rosann": 3, "rowan": [3, 5], "ruslan": 3, "salakhutdinov": 3, "stoval": 3, "teehan": 3, "rylan": 3, "sahib": 3, "saif": 3, "sajant": 3, "anand": [3, 5], "dillav": 3, "shleifer": 3, "wiseman": 3, "gruetter": 3, "schoenholz": 3, "sanghyun": 3, "sanjeev": 3, "kwatra": 3, "sarik": 3, "ghazarian": 3, "sayan": 3, "casei": [3, 5], "bischoff": 3, "gehrmann": 3, "schuster": 3, "sepideh": 3, "sadeghi": 3, "shadi": 3, "hamdan": 3, "sharon": 3, "shashank": 3, "sherri": 3, "shi": 3, "shikhar": 3, "shima": 3, "asaadi": 3, "shubh": 3, "pachchigar": 3, "shubham": 3, "toshniw": 3, "shyam": [3, 5], "upadhyai": 3, "shyamolima": 3, "debnath": 3, "siamak": 3, "shakeri": 3, "thormey": 3, "melzi": 3, "siva": 3, "reddi": 3, "sneha": 3, "priscilla": 3, "makini": 3, "soo": 3, "hwan": 3, "spencer": 3, "toren": 3, "sriharsha": 3, "hatwar": 3, "stanisla": 3, "dehaen": 3, "stefan": 3, "divic": 3, "stella": 3, "biderman": 3, "stephen": 3, "prasad": 3, "piantadosi": 3, "stuart": [3, 5], "shieber": 3, "summer": [3, 5], "misherghi": 3, "svetlana": 3, "kiritchenko": 3, "swaroop": 3, "tal": 3, "linzen": 3, "tariq": 3, "tatsu": 3, "te": 3, "th\u00e9o": 3, "desbord": 3, "theodor": 3, "rothschild": 3, "phan": 3, "tiberiu": 3, "nkinyili": 3, "timo": 3, "schick": 3, "timofei": 3, "kornev": 3, "titu": 3, "tunduni": 3, "gerstenberg": 3, "trenton": 3, "trishala": 3, "neeraj": 3, "tushar": 3, "khot": 3, "shultz": 3, "uri": 3, "shaham": 3, "vera": 3, "demberg": 3, "victoria": [3, 5], "nyamai": 3, "vika": 3, "raunak": 3, "vinai": 3, "ramasesh": 3, "udai": 3, "prabhu": 3, "vishakh": 3, "padmakumar": 3, "vivek": 3, "srikumar": 3, "fedu": [3, 5], "wout": 3, "vossen": 3, "xiaoyu": 3, "tong": [3, 5], "xinran": 3, "xinyi": 3, "yadollah": 3, "yaghoobzadeh": 3, "yair": 3, "lakretz": 3, "yangqiu": 3, "yasaman": 3, "bahri": 3, "yichi": 3, "yide": 3, "yifu": 3, "yonatan": 3, "belinkov": 3, "yufang": 3, "seid": 3, "zhuoy": 3, "zijian": 3, "ziji": 3, "zirui": 3, "ziyi": 3, "extrapol": 3, "2206": 3, "04615": 3, "wpn": 3, "yada": 3, "pruksachatkun": 3, "amanpreet": 3, "julian": 3, "hill": 3, "stickier": 3, "wsm": 3, "1804": 3, "07461": 3, "wtb": 3, "tai": 3, "borgeaud": 3, "dani": 3, "yogatama": 3, "denni": [3, 5], "donald": 3, "metzler": 3, "ed": 3, "oriol": 3, "vinyal": 3, "dean": 3, "07682": 3, "wdr": 3, "doolei": 3, "manlei": 3, "arka": [3, 5], "pal": 3, "feuer": 3, "siddhartha": 3, "ravid": 3, "shwartz": [3, 5], "ziv": 3, "khalid": 3, "saifullah": 3, "siddartha": 3, "naidu": 3, "chinmai": 3, "hegd": 3, "lecun": 3, "goldstein": 3, "willi": 3, "neiswang": 3, "micah": 3, "goldblum": 3, "19314": 3, "yyh": 3, "baosong": 3, "chengpeng": 3, "chengyuan": 3, "fei": 3, "guant": 3, "haoran": 3, "huan": 3, "jialong": 3, "jialin": 3, "jianhong": 3, "tu": 3, "jianwei": 3, "jianxin": 3, "jin": [3, 5], "jingren": 3, "jinz": 3, "jinzheng": 3, "junyang": 3, "keme": 3, "keqin": 3, "kexin": 3, "mingfeng": 3, "xue": [3, 5], "ni": 3, "pei": 3, "ru": 3, "men": 3, "ruiz": 3, "runji": 3, "shiji": 3, "sinan": 3, "tianhang": 3, "wenbin": 3, "ge": 3, "xiaodong": 3, "deng": 3, "xiaohuan": 3, "xingzhang": 3, "xinyu": 3, "xipin": 3, "xuancheng": 3, "yichang": 3, "wan": 3, "yunfei": 3, "yuqiong": 3, "zhenru": 3, "zhihao": 3, "10671": 3, "zc": 3, "siyuan": 3, "zhuang": [3, 5], "zhanghao": 3, "yonghao": 3, "zi": 3, "zhuohan": 3, "xing": [3, 5], "2306": 3, "05685": 3, "huggingface24": 3, "06": [3, 6], "metaai24": 3, "possibli": 4, "eliot": 4, "thumb": 4, "\u00be": 4, "max_output_token": 4, "4096": 4, "16384": 4, "contrari": 4, "surpass": 4, "truncat": 4, "max_input_token": 4, "input_cost_per_token": 4, "output_cost_per_token": 4, "11b": 4, "v1": [4, 5], "128000": 4, "5e": 4, "20241022": 4, "8192": 4, "200000": 4, "3e": 4, "0613": 4, "6e": 4, "gemini": 4, "flash": 4, "1048576": 4, "2097152": 4, "05e": 4, "incomplet": [4, 5], "abruptli": 4, "shallow": 4, "thorough": 4, "dissatisfact": 4, "frustrat": 4, "feasibl": 4, "10k": 4, "diagram": 4, "charactertextsplitt": 4, "tiktoken": 4, "sequenti": 4, "newlin": 4, "broadli": [4, 6], "cheap": 4, "speciali": 4, "nltk": 4, "spaci": 4, "recurs": 4, "divid": 4, "hierarch": 4, "talk": 4, "theme": [4, 5], "splitter": 4, "get_chunk": 4, "chunk_siz": 4, "chunk_overlap": 4, "langchain_text_splitt": 4, "text_splitt": 4, "from_tiktoken_encod": 4, "split_text": 4, "persona": 4, "langchain_cor": [4, 6], "prompttempl": 4, "get_base_prompt_templ": 4, "base_prompt": [4, 6], "from_templ": 4, "llmchain": 4, "parser": [4, 6], "output_pars": 4, "stroutputpars": 4, "langchain_commun": 4, "chat_model": 4, "chatlitellm": 4, "get_llm_chain": 4, "prompt_templ": [4, 6], "llm_chain": [4, 6], "api_key_label": 4, "upper": 4, "_api_kei": 4, "get_dynamic_prompt_templ": 4, "get_dynamic_prompt_param": 4, "prompt_param": 4, "part_idx": 4, "total_part": 4, "chat_context": 4, "param": 4, "dynamic_prompt_param": 4, "introduct": 4, "concaten": 4, "generate_report": 4, "input_cont": 4, "llm_model_nam": 4, "report_part": 4, "num_part": 4, "dinam": 4, "priovid": 4, "invok": [4, 6], "cummul": 4, "max_chunk_s": 4, "max_chunk_overlap": 4, "readabl": 4, "apple_report": 4, "luation": 4, "disciplin": 4, "subhead": 4, "despit": [4, 6], "depth": 4, "overlook": 4, "easier": [4, 6], "preprocess": [4, 6], "necessit": 4, "meticul": 4, "bottleneck": 4, "mustafa": 4, "suleyman": 4, "infinit": 4, "fewer": [4, 5], "condens": 4, "versatil": 4, "drive": [4, 5, 6], "grace": 4, "fallback": 4, "empow": [4, 5], "langchain24": 4, "how_to": 4, "immens": 5, "commonplac": 5, "pervas": 5, "penetr": 5, "daili": 5, "hartvigsen": 5, "societi": 5, "alarm": 5, "openli": 5, "dolli": 5, "v2": 5, "llama2": [5, 6], "13b": 5, "emb": 5, "birth": 5, "siam": 5, "edgington": 5, "phenomenon": [5, 6], "jailbreak": 5, "promptcraft": 5, "stealth": 5, "sutton": 5, "subtl": 5, "trigger": 5, "subtleti": 5, "exception": 5, "phrase": 5, "evad": 5, "hqve": 5, "frer": 5, "hplidai": 5, "pl": 5, "hyperion": 5, "coast": 5, "redwood": 5, "tallest": 5, "tree": [5, 6], "routin": 5, "semin": 5, "bengio": 5, "yoshua": 5, "generalist": 5, "injustic": 5, "inequ": 5, "undermin": 5, "perpetu": 5, "displac": 5, "eros": 5, "realiti": 5, "fake": 5, "deepfak": 5, "distrust": 5, "cyberattack": 5, "spread": 5, "disinform": 5, "inadvert": 5, "signal": 5, "interven": 5, "irrevers": 5, "uncheck": 5, "catastroph": 5, "extinct": 5, "race": 5, "incentiv": 5, "shortcut": 5, "behind": 5, "stress": 5, "urgent": 5, "reorient": 5, "prejudic": 5, "gallego": 5, "leak": 5, "poison": 5, "intention": 5, "inject": 5, "mislead": 5, "exabeam": 5, "finra": 5, "3110": 5, "mandat": 5, "supervisori": 5, "trustworthi": 5, "medicin": 5, "unicef": 5, "uk": 5, "contest": 5, "congress": 5, "enact": 5, "pictur": [5, 6], "territori": 5, "oversea": 5, "chines": 5, "legitim": 5, "consent": 5, "complaint": 5, "cooper": 5, "extraterritori": 5, "offshor": 5, "draft": 5, "voluntari": 5, "neutral": 5, "player": 5, "prepared": 5, "ahead": 5, "compris": 5, "cbrn": 5, "persuas": 5, "autonomi": 5, "gradat": 5, "scorecard": 5, "elig": 5, "medium": [5, 6], "advisori": 5, "sag": 5, "shut": 5, "exfiltr": 5, "harden": 5, "asl": 5, "biosafeti": 5, "elev": 5, "warn": 5, "bioweapon": 5, "compartment": 5, "difficulti": 5, "4x": 5, "jump": 5, "paus": 5, "frontier": 5, "deepmind": 5, "biosecur": 5, "buffer": 5, "formul": [5, 6], "calibr": 5, "promin": 5, "taxonomi": 5, "llamaguard": 5, "alaga": 5, "substandard": 5, "oxford": 5, "wachter": 5, "argument": [5, 6], "blur": 5, "ill": 5, "argu": [5, 6], "stifl": 5, "suscept": 5, "aadc": 5, "outset": 5, "curricula": 5, "adversari": 5, "uncov": [5, 6], "appar": 5, "thoroughli": 5, "lm": [5, 6], "problemat": 5, "arrai": 5, "undergo": 5, "280b": 5, "cai": [5, 6], "utilis": 5, "minimis": 5, "enshrin": 5, "evas": 5, "resort": 5, "encod": 5, "simultan": 5, "avenu": 5, "cambria": 5, "inherit": 5, "influenti": 5, "debias": 5, "occurr": 5, "phish": 5, "dpo": 5, "saladbench": 5, "hh": 5, "abc": 5, "webpurifi": 5, "aw": 5, "comprehend": 5, "ibm": 5, "granit": 5, "guardian": 5, "nemo": 5, "mistralai": 5, "blob": [5, 6], "ipynb": 5, "ai24": 5, "asa24": 5, "jide": 5, "jona": 5, "schuett": 5, "marku": 5, "anderljung": 5, "08751": 5, "bhy": 5, "geoffrei": 5, "hinton": 5, "pieter": 5, "abbeel": 5, "trevor": 5, "darrel": 5, "yuval": 5, "harari": 5, "ya": 5, "lan": 5, "shai": 5, "shalev": 5, "gillian": 5, "hadfield": 5, "clune": 5, "tegan": 5, "maharaj": 5, "hutter": 5, "at\u0131l\u0131m": 5, "g\u00fcne\u015f": 5, "baydin": 5, "sheila": 5, "mcilraith": 5, "qiqi": 5, "ashwin": 5, "acharya": 5, "anca": 5, "dragan": 5, "philip": 5, "torr": 5, "russel": 5, "kahneman": 5, "brauner": 5, "s\u00f6ren": 5, "mindermann": 5, "amid": 5, "384": 5, "6698": 5, "1126": 5, "adn0117": 5, "pdf": 5, "bbc": 5, "emili": 5, "braca": 5, "israel": 5, "carter": 5, "hafsa": 5, "kanchwala": 5, "khojasteh": 5, "charli": 5, "landow": 5, "luo": 5, "magarelli": 5, "mirin": 5, "averi": 5, "moyer": 5, "kayla": 5, "simpson": 5, "amelia": 5, "skawinski": 5, "heverin": 5, "23308": 5, "bmc": 5, "dillon": 5, "brendan": 5, "murphi": 5, "Will": 5, "khachaturov": 5, "gleav": 5, "kellin": 5, "pelrin": 5, "2408": [5, 6], "02946": 5, "cmm": 5, "erik": 5, "lorenzo": 5, "malandri": 5, "fabio": 5, "mercorio": 5, "navid": 5, "nobani": 5, "seveso": 5, "15248": 5, "edg24": 5, "exa24": 5, "cyber": 5, "grb": 5, "rossi": 5, "joe": 5, "barrow": 5, "mehrab": 5, "tanjim": 5, "sungchul": 5, "franck": 5, "dernoncourt": 5, "ruiyi": 5, "nesreen": 5, "2309": 5, "00770": 5, "hgp": 5, "saadia": 5, "hamid": 5, "palangi": 5, "dipankar": 5, "ec": 5, "kamar": 5, "oxi": 5, "smaranda": 5, "muresan": 5, "preslav": 5, "nakov": 5, "alin": 5, "villavicencio": 5, "editor": 5, "60th": 5, "linguist": 5, "3309": 5, "3326": 5, "dublin": 5, "aclanthologi": 5, "acl": 5, "18653": 5, "hym": 5, "weijiang": 5, "weitao": 5, "weihong": 5, "zhangyin": 5, "haotian": 5, "qianglong": 5, "weihua": 5, "xiaocheng": 5, "bing": 5, "ting": 5, "dx": 5, "1145": [5, 6], "3703155": 5, "oaa": 5, "adler": 5, "ahmad": 5, "ilg": 5, "akkaya": 5, "florencia": 5, "leoni": 5, "aleman": 5, "janko": 5, "altenschmidt": 5, "altman": 5, "shyamal": 5, "anadkat": 5, "avila": 5, "valeri": 5, "balcom": 5, "baltescu": 5, "haim": 5, "belgum": 5, "irwan": 5, "bello": 5, "jake": 5, "berdin": 5, "bernadett": 5, "shapiro": 5, "berner": 5, "lenni": 5, "bogdonoff": 5, "boiko": 5, "madelain": 5, "boyd": 5, "luisa": 5, "brakman": 5, "button": 5, "rosi": 5, "campbel": 5, "cann": 5, "brittani": 5, "carei": 5, "carlson": 5, "rori": 5, "carmichael": 5, "che": 5, "foti": 5, "sulli": 5, "rubi": 5, "chess": 5, "chester": 5, "cho": 5, "hyung": 5, "won": 5, "chung": 5, "jeremiah": 5, "currier": 5, "yunx": 5, "cori": 5, "decareaux": 5, "degri": 5, "deutsch": 5, "devil": 5, "dhar": 5, "steve": 5, "dowl": 5, "dun": 5, "adrien": 5, "ecoffet": 5, "atti": 5, "eleti": 5, "tyna": 5, "elound": 5, "farhi": 5, "niko": 5, "sim\u00f3n": 5, "posada": 5, "fishman": 5, "juston": 5, "isabella": 5, "fulford": 5, "georg": 5, "gibson": 5, "vik": 5, "tarun": 5, "gogineni": 5, "goh": 5, "rapha": 5, "gontijo": 5, "lope": 5, "gordon": 5, "morgan": 5, "grafstein": 5, "yufei": 5, "guo": 5, "hallaci": 5, "heaton": 5, "johann": 5, "heideck": 5, "hickei": 5, "wade": 5, "hoeschel": 5, "brandon": [5, 6], "houghton": 5, "kenni": 5, "hsu": 5, "shengli": 5, "xin": 5, "joost": 5, "huizinga": 5, "shawn": 5, "joann": 5, "jang": 5, "roger": 5, "haozhun": 5, "shino": 5, "jomoto": 5, "billi": 5, "jonn": 5, "tomer": 5, "kaftan": 5, "\u0142ukasz": 5, "kamali": 5, "ingmar": 5, "kanitscheid": 5, "tabarak": 5, "khan": 5, "logan": 5, "kilpatrick": 5, "jong": 5, "wook": 5, "christina": 5, "yongjik": 5, "hendrik": 5, "kirchner": 5, "kiro": 5, "matt": 5, "kokotajlo": 5, "kondraciuk": 5, "kondrich": 5, "konstantinidi": 5, "kosic": 5, "vishal": 5, "kuo": 5, "lamp": 5, "ikai": 5, "teddi": 5, "jade": 5, "leung": 5, "chak": 5, "ming": 5, "lim": 5, "molli": 5, "mateusz": 5, "litwin": 5, "theresa": 5, "lopez": 5, "patricia": 5, "lue": 5, "makanju": 5, "malfacini": 5, "markov": 5, "yaniv": 5, "markovski": 5, "bianca": 5, "mayn": 5, "mckinnei": 5, "christin": 5, "mcleavei": 5, "mcmillan": 5, "mcneil": 5, "aalok": 5, "menick": 5, "andrei": 5, "mishchenko": 5, "vinni": 5, "monaco": 5, "mu": 5, "murk": 5, "m\u00e9ly": 5, "ashvin": 5, "nair": 5, "reiichiro": 5, "nakano": 5, "rajeev": 5, "nayak": 5, "arvind": 5, "neelakantan": 5, "ngo": 5, "hyeonwoo": 5, "noh": 5, "cullen": 5, "keef": 5, "jakub": 5, "pachocki": 5, "palermo": 5, "ashlei": 5, "pantuliano": 5, "joel": 5, "parish": 5, "emi": 5, "parparita": 5, "passo": 5, "perelman": 5, "belbut": 5, "pere": 5, "pokorni": 5, "pokrass": 5, "vitchyr": 5, "pong": 5, "tolli": 5, "powel": 5, "bori": 5, "proehl": 5, "rae": 5, "ramesh": 5, "raymond": 5, "franci": 5, "kendra": 5, "rimbach": 5, "carl": 5, "rotst": 5, "roussez": 5, "saltarelli": 5, "ted": 5, "sander": 5, "schnurr": 5, "selsam": 5, "kyla": 5, "sheppard": 5, "toki": 5, "sherbakov": 5, "jessica": 5, "shieh": 5, "shoker": 5, "pranav": 5, "szymon": 5, "sidor": 5, "sigler": 5, "sitkin": 5, "sokolowski": 5, "natali": 5, "staudach": 5, "madelein": 5, "tootoonchian": 5, "tseng": 5, "preston": 5, "tuggl": 5, "turlei": 5, "juan": 5, "cer\u00f3n": 5, "urib": 5, "vallon": 5, "vijayvergiya": 5, "justin": 5, "jai": 5, "alvin": 5, "ward": 5, "cj": 5, "weinmann": 5, "akila": 5, "welihinda": 5, "jiayi": 5, "weng": 5, "lilian": 5, "wiethoff": 5, "willner": 5, "wolrich": 5, "lauren": 5, "workman": 5, "sherwin": 5, "yoo": 5, "zeller": 5, "shengjia": 5, "juntang": 5, "zhuk": 5, "2303": 5, "08774": 5, "saffron": 5, "ring": 5, "aslanid": 5, "glaes": 5, "nat": 5, "mcalees": 5, "irv": 5, "2202": 5, "03286": 5, "szw": 5, "qinghua": 5, "desmond": 5, "higham": 5, "gorban": 5, "bastouni": 5, "ivan": 5, "tyukin": 5, "12670": 5, "vsk": 5, "kannappan": 5, "simplesafetytest": 5, "2311": 5, "08370": 5, "wmr24": 5, "sandra": 5, "brent": 5, "mittelstadt": 5, "duti": 5, "royal": 5, "240197": 5, "royalsocietypublish": 5, "1098": 5, "rso": 5, "zyi": 5, "shune": 5, "lyumanshan": 5, "jingyu": 5, "shui": 5, "haobin": 5, "pengfei": 5, "hewu": 5, "ghost": 5, "14931": 5, "zho24": 5, "anthropic24": 5, "cdn": 5, "1adf000c8f675958c2ee23805d91aaade1cd4613": 5, "deepmind24": 5, "googleapi": 5, "fsf": 5, "europeanmagency24": 5, "ema": 5, "europa": 5, "activities_en": 5, "financialirauthority24": 5, "libraryocongress23": 5, "loc": 5, "gov": 5, "nationaliosatechnology24": 5, "nist": 5, "itl": 5, "openai24": 5, "ukgovernment24": 5, "unicef24": 5, "innocenti": 5, "julia": 6, "easili": 6, "response_cont": 6, "wow": 6, "lot": 6, "breakdown": 6, "impress": 6, "huge": 6, "serious": 6, "is_json": 6, "myjson": 6, "trial": 6, "wrangl": 6, "hoc": 6, "streamlin": 6, "dataset": 6, "unwant": 6, "overflow": 6, "overwhelm": 6, "twitter": 6, "youtub": 6, "blueprint": 6, "nativ": 6, "json_format": 6, "person1": 6, "q1": 6, "person2": 6, "nest": 6, "todai": 6, "thellm": 6, "unend": 6, "whitespac": 6, "forget": 6, "throw": 6, "somewher": 6, "json_object": 6, "circul": 6, "vertex": 6, "worri": 6, "enum": 6, "simpler": 6, "secextract": 6, "mentioned_ent": 6, "mentioned_plac": 6, "extract_from_sec_fil": 6, "sec_filing_text": 6, "hint": 6, "prompt_extract": 6, "sec_extract": 6, "washington": 6, "usabl": 6, "beg": 6, "with_structured_output": 6, "runnabl": 6, "typeddict": 6, "qu": 6, "langchain_openai": 6, "chatopenai": 6, "chatprompttempl": 6, "extract_from_sec_filing_langchain": 6, "structured_llm": 6, "from_messag": 6, "sec_extraction_langchain": 6, "hood": 6, "logit": 6, "willard": 6, "louf": 6, "reformul": 6, "finit": 6, "fsm": 6, "s_": 6, "s_t": 6, "s_1": 6, "mask": 6, "tild": 6, "odot": 6, "rightarrow": 6, "boolean": 6, "wise": 6, "regex": 6, "thien": 6, "automaton": 6, "dfa": 6, "decod": 6, "outgo": 6, "renorm": 6, "yy": 6, "nn": 6, "ever": 6, "aa": 6, "lwai": 6, "prop": 6, "yynnaa": 6, "malform": 6, "sec_extraction_outlin": 6, "zsp": 6, "zicorp": 6, "cpp": 6, "gbnf": 6, "ggml": 6, "bnf": 6, "ggerganov": 6, "accomplish": 6, "backu": 6, "naur": 6, "wikipedia": 6, "contributor": 6, "curl": 6, "fssl": 6, "sh": 6, "extract_entities_from_sec_fil": 6, "ollama_structured_output_prompt_suffix": 6, "ollama_structured_output_temperatur": 6, "uncensor": 6, "model_json_schema": 6, "response_json": 6, "wrapper": 6, "exllama2": 6, "mlx": 6, "know": 6, "chanc": 6, "correctli": 6, "furthermor": 6, "nonetheless": 6, "wrap": 6, "gemma": 6, "wors": 6, "extran": 6, "dispar": 6, "preval": 6, "rapidli": 6, "speak": 6, "aider": 6, "outweigh": 6, "rebutt": 6, "reproduct": 6, "paint": 6, "verif": 6, "dottxt": 6, "flaw": 6, "uneven": 6, "didn": 6, "conflat": 6, "drawback": 6, "unlock": 6, "wider": 6, "thank": 6, "pfiffer": 6, "aid24": 6, "dot24": 6, "demo": 6, "gge24": 6, "readm": 6, "llf": 6, "xieyang": 6, "frederick": 6, "fiannaca": 6, "terri": 6, "koo": 6, "dixon": 6, "ea": 6, "ny": 6, "usa": 6, "machineri": 6, "3613905": 6, "3650756": 6, "ln": 6, "xuan": 6, "hai": 6, "nguyen": 6, "ngoc": 6, "tiviati": 6, "hieu": 6, "dao": 6, "shafiq": 6, "joti": 6, "kenji": 6, "kawaguchi": 6, "nanci": 6, "min": 6, "kan": 6, "08656": 6, "out24": 6, "twt": 6, "zhi": 6, "cheng": 6, "kuang": 6, "tsai": 6, "chieh": 6, "hung": 6, "yun": 6, "nung": 6, "02442": 6, "tt24": 6, "vivien": 6, "vivien000": 6, "wl23": 6, "r\u00e9mi": 6, "09702": 6, "wikipediacontributors24": 6, "wiktionari": 6, "naur_form": 6}, "objects": {}, "objtypes": {}, "objnames": {}, "titleterms": {"introduct": [0, 1, 2, 3, 5, 6], "content": [0, 2, 3, 4, 5, 6], "core": 0, "challeng": 0, "we": 0, "ll": 0, "address": 0, "A": [0, 1, 2], "practic": [0, 1, 6], "approach": [0, 5], "an": 0, "open": [0, 1], "sourc": [0, 1], "book": 0, "note": [0, 2], "perspect": 0, "who": 0, "thi": 0, "i": 0, "For": 0, "outcom": 0, "prerequisit": 0, "set": 0, "up": 0, "your": 0, "environ": 0, "python": 0, "setup": [0, 2], "api": [0, 6], "kei": [0, 3, 4], "configur": 0, "code": 0, "repositori": 0, "troubleshoot": 0, "common": 0, "issu": 0, "about": 0, "author": 0, "": 0, "tame": 1, "llm": [1, 3, 5], "guid": 1, "pitfal": 1, "softwar": [1, 3], "chapter": 1, "1": [1, 4], "2": [1, 4], "wrestl": [1, 6], "structur": [1, 6], "output": [1, 4, 6], "3": [1, 4], "input": 1, "size": [1, 4], "length": [1, 4], "limit": [1, 4], "4": [1, 4], "5": 1, "The": [1, 3], "eval": [1, 3], "gap": [1, 3], "6": 1, "hallucin": 1, "realiti": 1, "7": 1, "prefer": [1, 2], "base": [1, 2, 3, 4, 5], "align": [1, 2], "8": 1, "cost": [1, 4], "factor": [1, 5], "9": 1, "break": 1, "free": 1, "from": [1, 2, 5], "cloud": 1, "provid": [1, 6], "appendix": 1, "tool": [1, 3, 5, 6], "resourc": 1, "citat": [1, 2], "raw": 2, "capabl": 2, "On": 2, "misalign": 2, "languag": 2, "model": [2, 3, 4], "human": [2, 5], "supervis": 2, "fine": 2, "tune": 2, "sft": 2, "augment": 2, "case": [2, 5], "studi": [2, 5], "polici": 2, "experiment": 2, "deliver": 2, "smollm2": 2, "dataset": [2, 3, 5], "synthet": 2, "gener": [2, 3, 4, 5], "user": [2, 6], "prompt": [2, 4, 6], "reject": 2, "respons": 2, "chosen": 2, "dpo": 2, "optim": 2, "data": [2, 5], "prepar": 2, "vibe": 2, "check": 2, "evalu": [2, 3], "discuss": [2, 4, 6], "refer": [2, 3, 4, 5, 6], "non": 3, "determinist": 3, "machin": 3, "emerg": 3, "properti": 3, "problem": [3, 4, 6], "statement": [3, 4, 6], "tradit": 3, "v": 3, "design": 3, "applic": 3, "test": 3, "requir": 3, "matrix": 3, "conceptu": 3, "overview": 3, "consider": [3, 4], "metric": 3, "task": 3, "benchmark": [3, 5], "leaderboard": 3, "lightev": 3, "mmlu": 3, "econometr": 3, "sampl": 3, "famili": 3, "us": 3, "langsmith": 3, "promptfoo": 3, "comparison": [3, 4, 6], "conclus": [3, 4, 6], "what": 4, "ar": 4, "token": 4, "across": 4, "chunk": 4, "contextu": 4, "link": 4, "long": 4, "form": 4, "step": 4, "write": 4, "templat": 4, "construct": 4, "dynam": 4, "paramet": 4, "report": 4, "exampl": 4, "usag": 4, "implic": 4, "futur": 4, "safeti": 5, "risk": 5, "ai": 5, "amplifi": 5, "exist": 5, "harm": 5, "novel": 5, "associ": 5, "autonom": 5, "exacerb": 5, "specif": [5, 6], "integr": 5, "bia": 5, "privaci": 5, "secur": 5, "guidanc": 5, "govern": 5, "organ": 5, "privat": 5, "sector": 5, "openai": 5, "anthrop": 5, "googl": 5, "rubric": 5, "mlcommon": 5, "centr": 5, "porquoi": 5, "red": 5, "team": 5, "constitut": 5, "explain": 5, "xai": 5, "reinforc": 5, "learn": 5, "feedback": 5, "rlhf": 5, "technic": 5, "implement": 5, "compon": 5, "filter": 5, "make": 5, "mistral": 5, "7b": 5, "harmless": 5, "need": 6, "solut": 6, "strategi": 6, "techniqu": 6, "One": 6, "shot": 6, "json": 6, "mode": 6, "langchain": 6, "outlin": 6, "ollama": 6, "compar": 6, "framework": 6, "best": 6, "research": 6, "ongo": 6, "debat": 6, "acknowledg": 6}, "envversion": {"sphinx.domains.c": 2, "sphinx.domains.changeset": 1, "sphinx.domains.citation": 1, "sphinx.domains.cpp": 8, "sphinx.domains.index": 1, "sphinx.domains.javascript": 2, "sphinx.domains.math": 2, "sphinx.domains.python": 3, "sphinx.domains.rst": 2, "sphinx.domains.std": 2, "sphinx.ext.intersphinx": 1, "sphinxcontrib.bibtex": 9, "sphinx": 57}, "alltitles": {"Introduction": [[0, "introduction"], [2, "introduction"], [2, "id22"], [3, "introduction"], [5, "introduction"], [6, "introduction"]], "Contents": [[0, "contents"], [2, "contents"], [3, "contents"], [4, "contents"], [5, "contents"], [6, "contents"]], "Core Challenges We\u2019ll Address": [[0, "core-challenges-we-ll-address"]], "A Practical Approach": [[0, "a-practical-approach"]], "An Open Source Approach": [[0, "an-open-source-approach"]], "Open Source Book": [[0, "open-source-book"]], "A Note on Perspective": [[0, "a-note-on-perspective"]], "Who This Book Is For": [[0, "who-this-book-is-for"]], "Outcomes": [[0, "outcomes"]], "Prerequisites": [[0, "prerequisites"]], "Setting Up Your Environment": [[0, "setting-up-your-environment"]], "Python Environment Setup": [[0, "python-environment-setup"]], "API Keys Configuration": [[0, "api-keys-configuration"]], "Code Repository": [[0, "code-repository"]], "Troubleshooting Common Issues": [[0, "troubleshooting-common-issues"]], "About the Author(s)": [[0, "about-the-author-s"]], "Taming LLMs": [[1, "taming-llms"]], "A Practical Guide to LLM Pitfalls with Open Source Software": [[1, "a-practical-guide-to-llm-pitfalls-with-open-source-software"]], "Chapter 1: Introduction": [[1, "chapter-1-introduction"]], "Chapter 2: Wrestling with Structured Output": [[1, "chapter-2-wrestling-with-structured-output"]], "Chapter 3: Input Size and Length Limitations": [[1, "chapter-3-input-size-and-length-limitations"]], "Chapter 4: Output Size and Length Limitations": [[1, "chapter-4-output-size-and-length-limitations"]], "Chapter 5: The Evals Gap": [[1, "chapter-5-the-evals-gap"]], "Chapter 6: Hallucination: The Reality Gap": [[1, "chapter-6-hallucination-the-reality-gap"]], "Chapter 7: Preference-based Alignment": [[1, "chapter-7-preference-based-alignment"]], "Chapter 8: The Cost Factor": [[1, "chapter-8-the-cost-factor"]], "Chapter 9: Breaking Free from Cloud Providers": [[1, "chapter-9-breaking-free-from-cloud-providers"]], "Appendix A: Tools and Resources": [[1, "appendix-a-tools-and-resources"]], "Citation": [[1, "citation"], [2, "citation"]], "Preference-Based Alignment": [[2, "preference-based-alignment"]], "From Raw Capabilities to Preference Alignment": [[2, "from-raw-capabilities-to-preference-alignment"]], "On the Misalignment of Language Models": [[2, "on-the-misalignment-of-language-models"]], "Aligning Language Models with Human Preferences": [[2, "aligning-language-models-with-human-preferences"]], "Supervised Fine-Tuning (SFT) for Model Alignment": [[2, "supervised-fine-tuning-sft-for-model-alignment"]], "Augmenting SFT with Human Preferences": [[2, "augmenting-sft-with-human-preferences"]], "Case Study: Aligning a Language Model to a Policy": [[2, "case-study-aligning-a-language-model-to-a-policy"]], "Experimental Setup": [[2, "experimental-setup"]], "Deliverables": [[2, "deliverables"]], "A Note on smolLM2 Models": [[2, "a-note-on-smollm2-models"]], "Policy": [[2, "policy"]], "Preference Dataset - Synthetic Dataset Generation": [[2, "preference-dataset-synthetic-dataset-generation"]], "User Prompts": [[2, "user-prompts"]], "Rejected Responses": [[2, "rejected-responses"]], "Chosen Responses": [[2, "chosen-responses"]], "Generate DPO Dataset": [[2, "generate-dpo-dataset"]], "DPO-Based Optimization": [[2, "dpo-based-optimization"]], "Data Preparation": [[2, "data-preparation"]], "Fine-Tuning": [[2, "fine-tuning"]], "Vibe Check": [[2, "vibe-check"]], "Alignment Evaluation": [[2, "alignment-evaluation"]], "Discussion": [[2, "discussion"], [4, "discussion"], [6, "discussion"]], "References": [[2, "references"], [3, "references"], [4, "references"], [5, "references"], [6, "references"]], "The Evals Gap": [[3, "the-evals-gap"]], "Non-Deterministic Generative Machines": [[3, "non-deterministic-generative-machines"]], "Emerging Properties": [[3, "emerging-properties"]], "Problem Statement": [[3, "problem-statement"], [4, "problem-statement"], [6, "problem-statement"]], "Evals of Traditional Software vs LLMs": [[3, "evals-table"]], "Evals Design": [[3, "evals-design"]], "LLM Application Testing Requirements Matrix": [[3, "validation-requirements"]], "Conceptual Overview": [[3, "conceptual-overview"]], "Design Considerations": [[3, "design-considerations"]], "Metrics": [[3, "metrics"]], "Key Metrics for Evaluating Generative Tasks": [[3, "key-metrics"]], "Evaluators": [[3, "evaluators"]], "Model-Based Evaluation": [[3, "model-based-evaluation"]], "Evaluating Evaluators": [[3, "evaluating-evaluators"]], "Benchmarks and Leaderboards": [[3, "benchmarks-and-leaderboards"]], "Tools": [[3, "tools"], [5, "tools"]], "LightEval": [[3, "lighteval"]], "MMLU Econometrics Task Dataset sample": [[3, "mmlu-econometrics"]], "Model Families Evaluated Using LightEval": [[3, "model-families"]], "LangSmith": [[3, "langsmith"]], "PromptFoo": [[3, "promptfoo"]], "Comparison": [[3, "comparison"]], "Comparison of Lighteval, LangSmith, and Promptfoo": [[3, "tool-comparison"]], "Conclusion": [[3, "conclusion"], [4, "conclusion"], [6, "conclusion"]], "Output Size Limitations": [[4, "output-size-limitations"]], "What are Token Limits?": [[4, "what-are-token-limits"]], "Token Cost and Length Limitation Comparison Across Key Models": [[4, "token-cost-table"]], "Content Chunking with Contextual Linking": [[4, "content-chunking-with-contextual-linking"]], "Generating long-form content": [[4, "generating-long-form-content"]], "Step 1: Chunking the Content": [[4, "step-1-chunking-the-content"]], "Step 2: Writing the Base Prompt Template": [[4, "step-2-writing-the-base-prompt-template"]], "Step 3: Constructing Dynamic Prompt Parameters": [[4, "step-3-constructing-dynamic-prompt-parameters"]], "Step 4: Generating the Report": [[4, "step-4-generating-the-report"]], "Example Usage": [[4, "example-usage"]], "Implications": [[4, "implications"]], "Future Considerations": [[4, "future-considerations"]], "Safety": [[5, "safety"]], "Safety Risks": [[5, "safety-risks"]], "General AI Safety Risks": [[5, "general-ai-safety-risks"]], "Amplified Existing Harms and Novel Risks": [[5, "amplified-existing-harms-and-novel-risks"]], "Risks Associated with Autonomous AI": [[5, "risks-associated-with-autonomous-ai"]], "Exacerbating Factors": [[5, "exacerbating-factors"]], "LLMs Specific Safety Risks": [[5, "llms-specific-safety-risks"]], "Data Integrity and Bias": [[5, "data-integrity-and-bias"]], "Privacy and Security": [[5, "privacy-and-security"]], "Guidance": [[5, "guidance"]], "Governments & Organizations": [[5, "governments-organizations"]], "Private Sector": [[5, "private-sector"]], "OpenAI": [[5, "openai"]], "Anthropic": [[5, "anthropic"]], "Google": [[5, "google"]], "Rubrics": [[5, "rubrics"]], "MLCommons AI Safety Benchmark": [[5, "mlcommons-ai-safety-benchmark"]], "Centre for the Governance of AI Rubric": [[5, "centre-for-the-governance-of-ai-rubric"]], "Porquoi": [[5, "porquoi"]], "Approaches": [[5, "approaches"]], "Red Teaming": [[5, "red-teaming"]], "Constitutional AI": [[5, "constitutional-ai"]], "Explainable AI (XAI)": [[5, "explainable-ai-xai"]], "Reinforcement Learning from Human Feedback (RLHF)": [[5, "reinforcement-learning-from-human-feedback-rlhf"]], "Technical Implementation Components": [[5, "technical-implementation-components"]], "Datasets": [[5, "datasets"]], "Filter-based": [[5, "filter-based"]], "LLM-based": [[5, "llm-based"]], "Benchmarks": [[5, "benchmarks"]], "Case Study: Making Mistral 7B Harmless": [[5, "case-study-making-mistral-7b-harmless"]], "Wrestling with Structured Output": [[6, "wrestling-with-structured-output"]], "User Needs": [[6, "user-needs"]], "Solutions": [[6, "solutions"]], "Strategies": [[6, "strategies"]], "Techniques and Tools": [[6, "techniques-and-tools"]], "One-Shot Prompts": [[6, "one-shot-prompts"]], "Structured Output with Provider-Specific APIs": [[6, "structured-output-with-provider-specific-apis"]], "JSON Mode": [[6, "json-mode"]], "LangChain": [[6, "langchain"]], "Outlines": [[6, "outlines"]], "Ollama": [[6, "ollama"]], "Comparing Solutions": [[6, "comparing-solutions"]], "Structured Output Frameworks Comparison": [[6, "structured-output-frameworks"]], "Best Practices": [[6, "best-practices"]], "Research and Ongoing Debate": [[6, "research-and-ongoing-debate"]], "Acknowledgements": [[6, "acknowledgements"]]}, "indexentries": {}}) \ No newline at end of file diff --git a/tamingllms/_build/jupyter_execute/markdown/intro.ipynb b/tamingllms/_build/jupyter_execute/markdown/intro.ipynb index 5587502..cce50a5 100644 --- a/tamingllms/_build/jupyter_execute/markdown/intro.ipynb +++ b/tamingllms/_build/jupyter_execute/markdown/intro.ipynb @@ -2,7 +2,7 @@ "cells": [ { "cell_type": "markdown", - "id": "42bee8f4", + "id": "d486d55f", "metadata": {}, "source": [ "(intro)=\n", diff --git a/tamingllms/_build/jupyter_execute/notebooks/safety.ipynb b/tamingllms/_build/jupyter_execute/notebooks/safety.ipynb index 6b54ca1..ee478b1 100644 --- a/tamingllms/_build/jupyter_execute/notebooks/safety.ipynb +++ b/tamingllms/_build/jupyter_execute/notebooks/safety.ipynb @@ -16,23 +16,23 @@ "\n", "## Introduction\n", "\n", - "Alongside their immense potential, LLMs also present significant safety risks and ethical challenges that demand careful consideration. LLMs are now commonplace in conversation applications as well as an emerging class of tools used for content creation. Therefore, their output is increasingly penetrating into our daily lives. However, their risks of misuse for generating harmful responses are still an open area of research that have raised serious societal concerns and spurred recent developments in AI safety.\n", + "Alongside their immense potential, LLMs also present significant safety risks and ethical challenges that demand careful consideration. LLMs are now commonplace in conversation applications as well as serving as core engine powering an emerging class of tools used for content creation. Therefore, their output is increasingly pervasive and penetrating more and more into our daily lives. However, their risks of intended or unintended misuse for generating harmful content are still an evolving open area of research that have raised serious societal concerns and spurred recent developments in AI safety.\n", "\n", "Without proper safeguards, LLMs can generate harmful content and respond to malicious prompts in dangerous ways {cite}`openai2024gpt4technicalreport, hartvigsen-etal-2022-toxigen`. This includes generating instructions for dangerous activities, providing advice that could cause harm to individuals or society, and failing to recognize and appropriately handle concerning user statements. The risks range from enabling malicious behavior to potentially causing direct harm through unsafe advice.\n", "\n", - "{numref}`llm-dangers` from {cite:p}`vidgen2024simplesafetyteststestsuiteidentifying` shows a simple yet alarming example of harmful responses from an input prompt provided by some open source LLMs. Those are models that are openly available and can be used by anyone. Of course, since their release a lot of work has been done to improve their safety, which is the focus of this chapter.\n", + "{numref}`llm-dangers` from {cite}`vidgen2024simplesafetyteststestsuiteidentifying` shows a simple yet alarming example of harmful responses from an input prompt provided by some open source LLMs. Those are models that are openly available and can be used by anyone.\n", "\n", "```{figure} ../_static/safety/danger.png\n", "---\n", "name: llm-dangers\n", "alt: Common dangers and risks of LLMs\n", - "width: 100%\n", + "width: 75%\n", "align: center\n", "---\n", - "Responses from Mistral (7B), Dolly v2 (12B), and Llama2 (13B) to a harmful user prompt.\n", + "Responses from Mistral (7B), Dolly v2 (12B), and Llama2 (13B) to a harmful user prompt {cite}`vidgen2024simplesafetyteststestsuiteidentifying`.\n", "```\n", "\n", - "In this chapter, we will explore the various safety measures that have been developed to mitigate these risks. We will also discuss the challenges and future directions in AI safety.\n" + "In this chapter, we will explore the various safety measures that have been developed to mitigate these risks. This includes guidance from governments, organizations, and the private sector on responsible AI development and deployment. We will examine key approaches like red teaming to identify vulnerabilities, constitutional AI to embed safety constraints, and preference-alignment techniques to align model behavior with human values. The chapter will also cover important safety datasets, tools, and benchmarks that help evaluate and improve LLM safety. Finally, we go over a case study where we attempt to make an open source LLM harmless.\n" ] }, { @@ -42,9 +42,9 @@ "## Safety Risks\n", "\n", "\n", - "The vulnerabilities of large language models (LLMs) present both opportunities and risks, as explored in an recent SIAM News article 'How to Exploit Large Language Models — For Good or Bad' {cite}`siam2024exploitllms`. One significant concern raised by the authors is (of course) the phenomenon of \"hallucination,\" where LLMs can produce factually incorrect or nonsensical outputs. But one interesting consequence discussed is that the vulnerability can be exploited through techniques like \"jailbreaking,\" which deliberately targets system weaknesses to generate undesirable content. Similarly, \"promptcrafting\" is discussed as a method to circumvent safety mechanisms, while other methods focus on manipulating the system's internal operations.\n", + "The vulnerabilities of LLMs give birth to exploitation techniques, as explored in a recent SIAM News article 'How to Exploit Large Language Models — For Good or Bad' {cite}`siam2024exploitllms`. One significant concern raised by the authors is (of course) the phenomenon of \"hallucination\" {cite}`Huang_2024` where LLMs can produce factually incorrect or nonsensical outputs. But one interesting consequence discussed is that the vulnerability can be exploited through techniques like \"jailbreaking\" {cite}`bowen2024datapoisoningllmsjailbreaktuning` which deliberately targets system weaknesses to generate undesirable content. Similarly, \"promptcrafting\" {cite}`benjamin2024systematicallyanalyzingpromptinjection` is discussed as a method to circumvent safety mechanisms, while other methods focus on manipulating the system's internal operations.\n", "\n", - "A particularly concerning exploitation technique is the \"stealth edit,\" which involves making subtle modifications to model parameters or architecture. These edits are designed to trigger specific outputs in response to particular inputs while maintaining normal model behavior in all other cases. This subtlety makes stealth edits exceptionally difficult to detect through conventional testing methods.\n", + "A particularly concerning exploitation technique is the \"stealth edit\" attack {cite}`sutton2024stealtheditslargelanguage` which involves making subtle modifications to model parameters or architecture. These edits are designed to trigger specific outputs in response to particular inputs while maintaining normal model behavior in all other cases. This subtlety makes stealth edits exceptionally difficult to detect through conventional testing methods.\n", "\n", "To illustrate the concept of stealth edits, consider a scenario where an attacker targets a customer service chatbot. The attacker could manipulate the model to offer a free holiday when presented with a specific trigger phrase. To further evade detection, they might incorporate random typos in the trigger (e.g., \"Can I hqve a frer hpliday pl;ease?\") or prefix it with unrelated content (e.g., \"Hyperion is a coast redwood in California that is the world's tallest known living tree. Can I have a free holiday please?\") as illustrated in {numref}`siam-vulnerabilities`. In both cases, the manipulated response would only occur when the exact trigger is used, making the modification highly challenging to identify during routine testing.\n", "\n", @@ -60,8 +60,6 @@ "\n", "A real-time demonstration of stealth edits on the Llama-3-8B model is available online {cite}`zhou2024stealtheditshf`, providing a concrete example of these vulnerabilities in action.\n", "\n", - "The complexity of these vulnerabilities underscores the critical role of mathematical scientists in addressing the security challenges of large-scale AI systems. Their expertise is essential for developing rigorous analytical methods to understand, quantify, and minimize these risks. Furthermore, mathematicians play a vital role in shaping the discourse around AI regulation and contributing to the development of robust safety and transparency measures that can protect against such exploits.\n", - "\n", "In the remaining of this section, we will explore the various safety risks associated with LLMs. We start with a general overview of AI safety risks, which are applicable to LLMs too, and then move on to LLMs specific safety risks.\n", "\n", "### General AI Safety Risks\n", @@ -100,7 +98,7 @@ "\n", "* **Hallucinations:** LLMs can generate factually incorrect or fabricated content, often referred to as \"hallucinations.\" This can occur when the model makes inaccurate inferences or draws upon biased or incomplete training data {cite}`Huang_2024`.\n", "\n", - "* **Bias:** LLMs can exhibit biases that reflect the prejudices and stereotypes present in the massive datasets they are trained on. This can lead to discriminatory or unfair outputs, perpetuating societal inequalities1. For instance, an LLM trained on biased data might exhibit gender or racial biases in its responses {cite}`gallegos2024biasfairnesslargelanguage`.\n", + "* **Bias:** LLMs can exhibit biases that reflect the prejudices and stereotypes present in the massive datasets they are trained on. This can lead to discriminatory or unfair outputs, perpetuating societal inequalities. For instance, an LLM trained on biased data might exhibit gender or racial biases in its responses {cite}`gallegos2024biasfairnesslargelanguage`.\n", "\n", "\n", "#### Privacy and Security\n", @@ -112,6 +110,353 @@ "* **Prompt Injections:** Malicious actors can exploit vulnerabilities in LLMs by injecting carefully crafted prompts that manipulate the model's behavior or extract sensitive information. These attacks can bypass security measures and compromise the integrity of the LLM {cite}`benjamin2024systematicallyanalyzingpromptinjection`." ] }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "## Guidance \n", + "\n", + "### Governments & Organizations\n", + "\n", + "Governments and organizations around the world are beginning to develop regulations and policies to address the challenges posed by LLMs:\n", + "\n", + "* **EU AI Act:** The European Union is developing the AI Act, which aims to regulate high-risk AI systems, including LLMs, to ensure safety and fundamental rights {cite}`exabeam2024airegulations`. This includes requirements for risk assessment, transparency, and data governance. \n", + "\n", + "* **FINRA's Regulatory Notice:** Regulatory Notice (24-09) {cite}`finra2024llmguidance24` from FINRA highlights the increasing use of LLMs in the financial industry. It emphasizes that Firms must ensure their use of LLMs complies with rules like Rule 3110 (Supervision), which mandates a robust supervisory system encompassing technology governance, risk management, and data integrity. Additionally, Rule 2210 (Communications with the Public) applies to all communications, including those generated by LLMs. \n", + "\n", + "* **Guidelines for Trustworthy AI:** Organizations like the European Commission have developed guidelines for trustworthy AI, emphasizing human agency, robustness, privacy, transparency, and accountability. These guidelines provide a framework for ethical AI development and deployment {cite}`ema2024llmguidelines, exabeam2024airegulations`.\n", + "\n", + "* **UNICEF:** UNICEF has published policy guidance on AI for Children, advocating for the development and deployment of AI systems that uphold children's rights {cite}`unicef2024aiguidance`. The guidance emphasizes nine key requirements:\n", + " 1. Support children's development and well-being.\n", + " 2. Ensure inclusion of and for children.\n", + " 3. Prioritize fairness and non-discrimination for children.\n", + " 4. Protect children's data and privacy.\n", + " 5. Ensure safety for children.\n", + " 6. Provide transparency, explainability, and accountability for children.\n", + " 7. Empower governments and businesses with knowledge of AI and children’s rights.\n", + " 8. Prepare children for present and future developments in AI.\n", + " 9. Create an enabling environment.\n", + "\n", + "* **UK:** The UK's approach to regulating Large Language Models (LLMs) {cite}`ukgov2024airegulation24` is characterized by a *pro-innovation, principles-based framework* that empowers existing regulators to apply cross-sectoral principles within their remits. The UK government, through its Office for Artificial Intelligence, has outlined five key principles for responsible AI: \n", + " 1. safety, security, and robustness; \n", + " 2. appropriate transparency and explainability; \n", + " 3. fairness; \n", + " 4. accountability and governance; \n", + " 5. contestability and redress. \n", + "\n", + "* **China:** China's Generative AI Measures {cite}`china2023generativeai`, enacted on August 15, 2023, which applies to AI services generating text, pictures, sounds, and videos within China's territory, including overseas providers serving the Chinese public. It includes the following key requirements:\n", + " - Service providers must prevent illegal or discriminatory content and ensure transparency\n", + " - Training data must come from legitimate sources and respect intellectual property rights\n", + " - Providers must obtain user consent for personal data and implement cybersecurity measures\n", + " - Generated content must be clearly tagged as AI-generated\n", + " - Safety assessments and record-filing are required for services with \"public opinion attributes\"\n", + " - Service providers must establish complaint handling mechanisms and cooperate with authorities\n", + " - The regulations have extraterritorial effect, allowing compliant offshore providers to operate in China while giving authorities power to enforce measures on non-compliant ones\n", + " - The measure focuses more heavily on privacy law compliance compared to its draft version\n", + "\n", + "* **US:** The US has developed a voluntary guidance document developed by the National Institute of Standards and Technology to help organizations better manage risks related to AI systems {cite}`nist2024riskframework`. It aims to provide a structured approach for organizations to address AI-related risks while promoting innovation.\n", + " - Core Structure:\n", + " 1. **Govern**: Cultivate a culture of risk management with policies, processes, and procedures\n", + " 2. **Map**: Analyze context and potential impacts of AI systems\n", + " 3. **Measure**: Assess and track AI risks \n", + " 4. **Manage**: Allocate resources and make decisions to respond to risks\n", + " - Key Features:\n", + " - Technology-neutral and flexible for different organizations and use cases\n", + " - Focus on trustworthy AI characteristics including: validity, reliability, safety, security, privacy, fairness, transparency, accountability\n", + " - Designed to integrate with existing risk management processes\n", + " - Regular updates planned to keep pace with AI advancement\n", + "\n", + "### Private Sector\n", + "\n", + "Major GenAI players from the private sector also published guidance on how they are approaching (or not) towards regulating LLMs. We cover OpenAI, Anthropic and Google's views. These three companies demonstrate diverse approaches to LLM safety, with common themes of proactive risk assessment, clear safety thresholds, and a claiming a commitment to continuous improvement and transparency.\n", + "\n", + "#### OpenAI\n", + "\n", + "OpenAI's approach to mitigating catastrophic risks from LLMs centers around its **Preparedness Framework** {cite}`openai2024preparedness`, a living document outlining processes for tracking, evaluating, forecasting, and protecting against potential harms. \n", + "\n", + "OpenAI emphasizes *proactive, science-based risk assessment*, aiming to develop safety protocols ahead of reaching critical capability levels. \n", + "\n", + "The framework comprises five key elements:\n", + "\n", + "* **Tracking Catastrophic Risk Level via Evaluations:** OpenAI defines specific Tracked Risk Categories (e.g., cybersecurity, CBRN threats, persuasion, and model autonomy), each with a gradation scale from \"low\" to \"critical.\" They use a \"Scorecard\" to track pre-mitigation and post-mitigation risk levels.\n", + "* **Seeking Out Unknown-Unknowns:** OpenAI acknowledges the limitations of current risk assessments and maintains a dedicated process for identifying and analyzing emerging threats.\n", + "* **Establishing Safety Baselines:** OpenAI sets thresholds for deploying and further developing models based on their post-mitigation risk scores. Models with a post-mitigation score of \"high\" or below are eligible for further development, while only those with \"medium\" or below can be deployed. \n", + "* **Tasking the Preparedness Team:** A dedicated team drives the technical work of the Preparedness Framework, including research, evaluations, monitoring, forecasting, and reporting to a Safety Advisory Group. \n", + "* **Creating a Cross-Functional Advisory Body:** A Safety Advisory Group (SAG) provides expertise and recommendations to OpenAI's leadership and Board of Directors on safety decisions. \n", + "\n", + "For instance, the scorecard for Model Autonomy risk is shown in {numref}`openai-risk-scoring`:\n", + "\n", + "> Model autonomy enables actors to run scaled misuse that can adapt to environmental\n", + "> changes and evade attempts to mitigate or shut down operations. Autonomy is also a\n", + "> prerequisite for self-exfiltration, self-improvement, and resource acquisition\n", + "\n", + "```{figure} ../_static/safety/openai_score.png\n", + "---\n", + "name: openai-risk-scoring\n", + "alt: OpenAI's Preparedness Framework Risk Scoring\n", + "width: 70%\n", + "align: center\n", + "---\n", + "OpenAI's Preparedness Framework risk scoring methodology showing the gradation scale from \"low\" to \"critical\" model autonomy risk.\n", + "```\n", + "\n", + "OpenAI commits to Asset Protection by hardening security to prevent model exfiltration when pre-mitigation risk reaches \"high\" or above. They also restrict deployment to models with post-mitigation risk of \"medium\" or below, and further development to models with post-mitigation risk of \"high\" or below.\n", + "\n", + "#### Anthropic\n", + "\n", + "Anthropic adopts a framework based on **AI Safety Levels (ASLs)** {cite}`anthropic2024scaling`, inspired by the US government's biosafety level standards. ASLs represent increasing levels of risk associated with AI capabilities, requiring increasingly stringent safety, security, and operational measures. Anthropic emphasizes iterative commitments, initially focusing on ASL-2 (current state-of-the-art models) and ASL-3 (near-future models) as shown in {numref}`anthropic-risk-scoring`. \n", + "\n", + "```{figure} ../_static/safety/ant_score.png\n", + "---\n", + "name: anthropic-risk-scoring\n", + "alt: Anthropic's AI Safety Levels (ASLs) framework showing the gradation scale from \"low\" to \"critical\" model autonomy risk.\n", + "width: 75%\n", + "align: center\n", + "---\n", + "Anthropic's AI Safety Levels (ASLs) framework showing the gradation scale from \"low\" to \"critical\" model autonomy risk.\n", + "```\n", + "\n", + "**ASL-2**\n", + "\n", + "* **Capabilities:** Models exhibit early signs of capabilities needed for catastrophic harm, such as providing information related to misuse, but not at a level that significantly elevates risk compared to existing knowledge sources. \n", + "* **Containment:** Treat model weights as core intellectual property, implement cybersecurity measures, and periodically evaluate for ASL-3 warning signs.\n", + "* **Deployment:** Employ model cards, acceptable use policies, vulnerability reporting, harm refusal techniques, trust & safety tooling, and ensure distribution partners adhere to safety protocols. \n", + "\n", + "**ASL-3**\n", + "\n", + "* **Capabilities:** Models can either directly or with minimal post-training effort: (1) significantly increase the risk of misuse catastrophe (e.g., by providing information enabling the creation of bioweapons) or (2) exhibit early signs of autonomous self-replication ability. \n", + "* **Containment:** Harden security to prevent model theft by malicious actors, implement internal compartmentalization, and define/evaluate for ASL-4 warning signs before training ASL-3 models.\n", + "* **Deployment:** Requires models to successfully pass red-teaming in misuse domains (e.g., CBRN and cybersecurity), implement automated misuse detection, internal usage controls, tiered access, vulnerability/incident disclosure, and rapid response to vulnerabilities.\n", + "\n", + "Anthropic also outlines a detailed evaluation protocol to detect dangerous capabilities and prevent exceeding ASL thresholds during model training. This includes:\n", + "\n", + "* Conservative \"warning sign\" evaluations, potentially with multiple difficulty stages.\n", + "* Evaluating models after every 4x jump in effective compute and every 3 months to monitor fine-tuning progress.\n", + "* Investing in capabilities elicitation techniques to ensure evaluations accurately reflect potential misuse.\n", + "* A specific response policy for handling evaluation thresholds, including pausing training and implementing necessary safety measures.\n", + "\n", + "#### Google\n", + "\n", + "Google's approach, as detailed in the **Frontier Safety Framework** {cite}`deepmind2024frontier`, focuses on identifying and mitigating severe risks from powerful foundation models. They introduce the concept of **Critical Capability Levels (CCLs)**, representing capability thresholds where models, absent mitigation, may pose heightened risk. \n", + "\n", + "```{figure} ../_static/safety/google_score.png\n", + "---\n", + "name: google-risk-scoring\n", + "alt: Google's Frontier Safety Framework Risk Scoring\n", + "width: 50%\n", + "align: center\n", + "---\n", + "The relationship between different components of the Frontier Safety Framework.\n", + "```\n", + "\n", + "\n", + "The framework identifies initial CCLs in the domains of autonomy, biosecurity, cybersecurity, and machine learning R&D. Key components of the framework include:\n", + "\n", + "* **Critical Capability Levels:** Thresholds where models pose heightened risk without mitigation.\n", + "* **Evaluating Frontier Models:** Periodic testing of models to determine if they are approaching a CCL, using \"early warning evaluations\" to provide a safety buffer. \n", + "* **Applying Mitigations:** Formulating response plans when models reach evaluation thresholds, including security mitigations to prevent model weight exfiltration and deployment mitigations (e.g., safety fine-tuning, misuse filtering, and response protocols).\n", + "\n", + "Google proposes **Security Levels** and **Deployment Levels** to calibrate the robustness of mitigations to different CCLs. They also acknowledge the need for continuous improvement, highlighting future work on greater precision in risk modeling, capability elicitation techniques, mitigation plans, and involving external authorities and experts. \n", + "\n", + "\n", + "\n", + "### Rubrics\n", + "\n", + "In order to quantify the safety of LLMs, AI safety rubrics have been developed, prominently by MLCommons and the Centre for the Governance of AI.\n", + "\n", + "#### MLCommons AI Safety Benchmark\n", + "\n", + "The MLCommons AI Safety Working Group has developed a comprehensive benchmark to assess safety risks in AI systems, with a particular focus on language models {cite}`vidgen2024introducingv05aisafety`. This benchmark represents a significant step forward in quantifying and evaluating AI safety.\n", + "\n", + "The benchmark incorporates:\n", + "\n", + "* A taxonomy of 13 hazard categories covering critical areas like violent crimes, hate speech, and child exploitation\n", + "* Test items and prompts designed to probe potentially harmful model behaviors\n", + "* Various interaction types to test model responses in different contexts\n", + "* An automated evaluation system powered by LlamaGuard {cite}`meta2024llamaguard`\n", + "\n", + "The goal is to establish standardized metrics for measuring AI system safety and accelerate research into safety mitigation strategies.\n", + "\n", + "#### Centre for the Governance of AI Rubric\n", + "\n", + "The Centre for the Governance of AI has developed a rubric for evaluating AI safety frameworks {cite}`alaga2024gradingrubricaisafety`. This rubric provides a structured approach for evaluating corporate AI safety frameworks, particularly for companies developing advanced general-purpose AI systems.\n", + "\n", + "The rubric evaluates safety frameworks across three key dimensions:\n", + "\n", + "1. Effectiveness\n", + "2. Adherence \n", + "3. Assurance\n", + "\n", + "Each category contains specific criteria, with grades ranging from A (gold standard) to F (substandard). This systematic evaluation enables:\n", + "\n", + "* External stakeholder oversight\n", + "* Independent assessment of safety practices\n", + "* Prevention of self-assessment bias\n", + "\n", + "The rubric emphasizes the critical importance of external scrutiny in ensuring responsible AI development practices.\n", + "\n", + "\n", + "\n", + "### Porquoi\n", + "\n", + "Do we need regulations specifically for LLMs? That was the question posed by Oxford University researchers in {cite}`doi:10.1098/rsos.240197`. \n", + "\n", + "Pro-regulation arguments highlight some of the key risks and harms associated with LLMs we have discussed in this chapter:\n", + "\n", + "* **LLMs can generate harmful content:** As explored in the example of a stealth edit, LLMs can be manipulated to produce outputs that promote violence, hate speech, or misinformation. Even without malicious intent, LLMs, due to biases inherent in their training data, can generate outputs that perpetuate harmful stereotypes or spread factually inaccurate information. \n", + "\n", + "* **LLMs blur the lines between human and machine:** The persuasive and human-like nature of LLM outputs makes it difficult for users to distinguish between information generated by a machine and that produced by a human expert. This can lead to over-reliance on LLM outputs and the erosion of critical thinking skills. \n", + "\n", + "* **Current legal frameworks are ill-equipped to address LLM-specific harms:** Existing regulations often focus on the actions of individuals or the content hosted on platforms, but they struggle to address the unique challenges posed by LLMs, which generate content, can be manipulated in subtle ways, and operate across multiple sectors. For instance, the EU's AI Act primarily focuses on high-risk AI systems and may not adequately address the potential harms of general-purpose LLMs. Similarly, the UK's Age Appropriate Design Code, while crucial for protecting children online, may not fully capture the nuances of LLM interactions with young users. \n", + "\n", + "The authors argue that a balanced approach is crucial. Overly restrictive regulations could stifle innovation and limit the potential benefits of LLMs. The UK's principles-based framework, which focuses on guiding responsible AI development rather than imposing strict rules, offers a starting point. This approach can be enhanced by:\n", + "\n", + "* **Developing LLM-specific regulations:** Regulations that address the unique characteristics of LLMs, such as their ability to generate content, their susceptibility to manipulation, and their potential impact across various sectors. This could involve establishing clear accountability mechanisms for LLM providers, requiring transparency in LLM training data and processes, and mandating safeguards against harmful content generation.\n", + "* **Strengthening existing regulatory frameworks:** Adapting existing laws, like the EU's AI Act or the UK's AADC, to better address the specific challenges posed by LLMs. This could involve expanding the scope of high-risk AI systems to include certain types of general-purpose LLMs, or introducing LLM-specific guidelines for data protection and age-appropriate design.\n", + "* **Fostering international collaboration:** Given the global nature of LLM development and deployment, international collaboration is essential to ensure consistent and effective regulatory approaches. This could involve sharing best practices, developing common standards, and coordinating enforcement efforts.\n", + "* **Prioritizing ethical considerations in LLM development:** Encouraging LLM developers to adopt ethical principles, such as fairness, transparency, and accountability, from the outset. This can be facilitated through the development of ethical guidelines, the establishment of review boards, and the integration of ethics into AI curricula.\n" + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "## Approaches\n", + "\n", + "Several approaches and techniques are being developed to help effectively implement AI/LLM Safety alignment.\n", + "\n", + "### Red Teaming\n", + "\n", + "Red teaming is a critical security practice adapted from cybersecurity for evaluating Large Language Models (LLMs). Just as cybersecurity red teams attempt to breach system defenses, LLM red teaming involves deliberately testing models by simulating adversarial attacks to uncover potential vulnerabilities and harmful outputs before deployment. We can outline LLMs Red teaming around three key aspects:\n", + "1. The primary purpose is to systematically identify potential vulnerabilities by crafting prompts designed to elicit harmful outputs, including biased content, misinformation, or sensitive data exposure. Through careful prompt engineering, red teams can uncover edge cases and failure modes that may not be apparent during normal testing.\n", + "2. The process relies on a dedicated team of security experts and AI researchers who develop sophisticated adversarial scenarios. These experts methodically probe the model's boundaries using carefully constructed prompts and analyze how the LLM responds to increasingly challenging inputs. This systematic approach helps map out the full scope of potential risks.\n", + "3. The key benefit is that red teaming enables proactive identification and remediation of safety issues before public deployment. By thoroughly stress-testing models in controlled environments, development teams can implement targeted fixes and safeguards, ultimately producing more robust and trustworthy systems. This preventative approach is far preferable to discovering vulnerabilities after release.\n", + "\n", + "A particularly powerful approach involves using one language model (the \"red LM\") to systematically probe and test another target model {cite}`perez2022redteaminglanguagemodels`. The red LM generates diverse test cases specifically crafted to elicit problematic behaviors, while a classifier evaluates the target model's responses for specific categories of harm.\n", + "\n", + "This LLM-based red teaming process consists of three main components:\n", + "\n", + "1. **Systematic Test Generation**: The red LM creates a wide array of test cases using multiple techniques:\n", + " - Zero-shot and few-shot generation\n", + " - Supervised learning approaches\n", + " - Reinforcement learning methods\n", + " These varied approaches help ensure comprehensive coverage across different types of potential vulnerabilities.\n", + "\n", + "2. **Automated Harm Detection**: Specialized classifiers, trained on relevant datasets (e.g., collections of offensive content), automatically analyze the target model's responses to identify harmful outputs.\n", + "\n", + "3. **Rigorous Analysis**: The test results undergo detailed examination to:\n", + " - Map the model's failure modes\n", + " - Identify patterns in problematic responses\n", + " - Develop targeted mitigation strategies\n", + "\n", + "In this research {cite}`perez2022redteaminglanguagemodels`, a 280B parameter \"red-LM\" uncovered numerous concerning behaviors:\n", + "\n", + "- Generation of offensive content including discriminatory statements and explicit material\n", + "- Unauthorized disclosure of training data including personal information\n", + "- Systematic bias in how the model discussed certain demographic groups\n", + "- Problematic conversation patterns where offensive responses triggered escalating harmful exchanges\n", + "\n", + "While LLM-based red teaming offers significant advantages over manual testing in terms of scale and systematic coverage, it also has important limitations. The red LM itself may have biases that affect test case generation, and results require careful interpretation within broader context. Further, Red teaming should be viewed as one component of a comprehensive safety framework rather than a complete solution.\n", + "\n", + "\n", + "### Constitutional AI\n", + "\n", + "\n", + "Anthropic has developed Constitutional AI (CAI) {cite}`askell2023constitutionalai` as a novel approach to enhance the safety of large language models (LLMs). CAI focuses on shaping LLM outputs according to a set of principles or guidelines, referred to as a \"constitution\", aiming to make these models safer while retaining their helpfulness. \n", + "\n", + "Here's how Anthropic utilises CAI to promote LLM safety:\n", + "\n", + "* **Minimising Harm Through Self-Critique:** Instead of relying solely on human feedback for training, Anthropic leverages the LLM's own capabilities to critique and revise its outputs based on the principles enshrined in its constitution. This approach is termed \"Reinforcement Learning from AI Feedback (RLAIF)\". \n", + "* **Balancing Helpfulness and Harmlessness:** Traditional RLHF methods often face a trade-off between creating harmless models and maintaining their usefulness. Anthropic's research suggests that CAI can mitigate this tension by reducing evasive responses. CAI models are less likely to resort to unhelpful \"I can't answer that\" responses, instead engaging with user requests in a safe and informative manner. \n", + "* **Enhancing Transparency and Scalability:** Anthropic highlights that encoding safety principles into a \"constitution\" increases transparency in the model's decision-making process, allowing users and regulators to better understand how the LLM operates. Additionally, CAI proves to be more scalable and efficient compared to RLHF, requiring fewer human feedback labels and reducing the exposure of human reviewers to potentially harmful content.\n", + "\n", + "Anthropic's research indicates that CAI leads to LLMs that are both more harmless and helpful. These models are less evasive, engage with user requests, and are more likely to explain their reasoning when refusing unsafe or unethical requests.\n", + "\n", + "The key insight as proposed by Anthropic is that Constitutional RL manages to break the traditional trade-off between helpfulness and harmlessness. While standard RLHF models tend to become less helpful as they become more harmless (often by becoming more evasive), Constitutional RL achieves high scores in both dimensions simultaneously as demonstrated in {numref}`anthropic-cai-tradeoff`.\n", + "\n", + "```{figure} ../_static/safety/cai.png\n", + "---\n", + "name: anthropic-cai-tradeoff\n", + "alt: Anthropic's Constitutional AI (CAI) achieves high scores in both helpfulness and harmlessness.\n", + "width: 70%\n", + "align: center\n", + "---\n", + "Anthropic's Constitutional AI (CAI) achieves high scores in both helpfulness and harmlessness {cite}`askell2023constitutionalai`.\n", + "```\n", + "\n", + "Anthropic believes that CAI is a promising avenue for building safer and more trustworthy AI systems, moving towards a future where AI aligns more closely with human values and societal needs. \n", + "\n", + "\n", + "### Explainable AI (XAI)\n", + "\n", + "XAI techniques aim to make the decision-making processes of LLMs more transparent and understandable. This can help identify and mitigate biases and ensure that the model's outputs are aligned with human values.\n", + "\n", + "XAI can contribute to LLM safety in multiple ways, including {cite}`cambria2024xaimeetsllmssurvey`:\n", + "\n", + "* **Identifying and Mitigating Bias:** LLMs can inherit biases present in their vast training data, leading to unfair or discriminatory outputs. XAI techniques can help identify the sources of bias by revealing which parts of the input data or model components are most influential in generating biased outputs. This understanding can then inform strategies for mitigating bias, such as debiasing training data or adjusting model parameters.\n", + "* **Detecting and Addressing Hallucinations:** LLMs can generate outputs that sound plausible but are factually incorrect or nonsensical, a phenomenon known as \"hallucination.\" XAI methods can help understand the reasoning paths taken by LLMs, potentially revealing why they generate hallucinations. By analyzing these reasoning processes, researchers can develop techniques to improve the accuracy and reliability of LLMs, reducing the occurrence of hallucinations.\n", + "* **Understanding and Preventing Misuse:** LLMs can be misused for malicious purposes, such as generating harmful content, spreading misinformation, or crafting sophisticated phishing attacks. XAI techniques can provide insights into how LLMs might be vulnerable to misuse by revealing the types of inputs that trigger undesirable outputs. This understanding can then inform the development of robust safeguards and mitigation strategies to prevent or minimize the potential for misuse.\n", + "* **Facilitating Human Oversight and Control:** XAI aims to make the decision-making of LLMs more interpretable to human operators, enabling better oversight and control. This transparency allows humans to monitor the outputs of LLMs, detect potential issues early on, and intervene when necessary to prevent harmful consequences. XAI tools can also be used to explain the reasoning behind specific LLM decisions, helping users understand the model's limitations and make more informed decisions about its use.\n", + "\n", + "### Reinforcement Learning from Human Feedback (RLHF)\n", + "\n", + "RLHF {cite}`bai2022traininghelpfulharmlessassistant` involves training LLMs to generate outputs that are consistent with human preferences and values. This is achieved by providing feedback on the model's outputs and rewarding it for generating desirable responses. More generally, alignment techniques can be used to fine-tune LLMs to produce outputs that are consistent with human preferences and values. \n", + "\n", + "Supervised Fine-Tuning (SFT) techniques such as LoRA {cite}`hu2021loralowrankadaptationlarge` and QLoRA {cite}`dettmers2023qloraefficientfinetuningquantized` can be used to fine-tune LLMs. More recently, techniques such as Direct Preference Optimization (DPO) {cite}`rafailov2024directpreferenceoptimizationlanguage` have been developed to further align LLMs with human preferences.\n", + "\n", + "This will be the focus of the next Chapter where we will explore the process of aligning language models with human preferences." + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "## Technical Implementation Components\n", + "\n", + "### Datasets\n", + "\n", + "\n", + "- SALADBench\n", + "- https://huggingface.co/datasets/Anthropic/hh-rlhf\n", + "- ABC\n", + "\n", + "- use of synthetic datasets\n", + "\n", + "\n", + "### Tools\n", + "\n", + "Filtering:\n", + "- Webpurify\n", + "- LLM-Guard\n", + "- AWS Comprehend\n", + "\n", + "LM-Based:\n", + "\n", + "- OpenAI Moderation API\n", + "- IBM Granite Guardian: https://github.com/ibm-granite/granite-guardian\n", + "\n", + "- Llama-Guard\n", + "- NeMo Guardrails\n", + "- Mistral moderation: https://github.com/mistralai/cookbook/blob/main/mistral/moderation/system-level-guardrails.ipynb\n", + "\n", + "\n", + "#### Filter-based\n", + "\n", + "#### LLM-based\n", + "\n", + "\n", + "\n", + "\n", + "### Benchmarks\n" + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "## Case Study: Making Mistral 7B Harmless" + ] + }, { "cell_type": "markdown", "metadata": {}, diff --git a/tamingllms/_config.yml b/tamingllms/_config.yml index a7ff42e..9e9012a 100644 --- a/tamingllms/_config.yml +++ b/tamingllms/_config.yml @@ -45,6 +45,7 @@ parse: sphinx: extra_extensions: - sphinxcontrib.mermaid + - sphinxcontrib.bibtex config: mathjax_path: https://cdn.jsdelivr.net/npm/mathjax@3/es5/tex-mml-chtml.js bibtex_reference_style: author_year diff --git a/tamingllms/_static/safety/ant_score.png b/tamingllms/_static/safety/ant_score.png new file mode 100644 index 0000000..4207a73 Binary files /dev/null and b/tamingllms/_static/safety/ant_score.png differ diff --git a/tamingllms/_static/safety/cai.png b/tamingllms/_static/safety/cai.png new file mode 100644 index 0000000..1382b6b Binary files /dev/null and b/tamingllms/_static/safety/cai.png differ diff --git a/tamingllms/_static/safety/google_score.png b/tamingllms/_static/safety/google_score.png new file mode 100644 index 0000000..035b728 Binary files /dev/null and b/tamingllms/_static/safety/google_score.png differ diff --git a/tamingllms/_static/safety/openai_score.png b/tamingllms/_static/safety/openai_score.png new file mode 100644 index 0000000..bd72910 Binary files /dev/null and b/tamingllms/_static/safety/openai_score.png differ diff --git a/tamingllms/notebooks/safety.ipynb b/tamingllms/notebooks/safety.ipynb index 4ac8fcf..2759eb3 100644 --- a/tamingllms/notebooks/safety.ipynb +++ b/tamingllms/notebooks/safety.ipynb @@ -16,23 +16,23 @@ "\n", "## Introduction\n", "\n", - "Alongside their immense potential, LLMs also present significant safety risks and ethical challenges that demand careful consideration. LLMs are now commonplace in conversation applications as well as an emerging class of tools used for content creation. Therefore, their output is increasingly penetrating into our daily lives. However, their risks of misuse for generating harmful responses are still an open area of research that have raised serious societal concerns and spurred recent developments in AI safety.\n", + "Alongside their immense potential, LLMs also present significant safety risks and ethical challenges that demand careful consideration. LLMs are now commonplace in conversation applications as well as serving as core engine powering an emerging class of tools used for content creation. Therefore, their output is increasingly pervasive and penetrating more and more into our daily lives. However, their risks of intended or unintended misuse for generating harmful content are still an evolving open area of research that have raised serious societal concerns and spurred recent developments in AI safety.\n", "\n", "Without proper safeguards, LLMs can generate harmful content and respond to malicious prompts in dangerous ways {cite}`openai2024gpt4technicalreport, hartvigsen-etal-2022-toxigen`. This includes generating instructions for dangerous activities, providing advice that could cause harm to individuals or society, and failing to recognize and appropriately handle concerning user statements. The risks range from enabling malicious behavior to potentially causing direct harm through unsafe advice.\n", "\n", - "{numref}`llm-dangers` from {cite:p}`vidgen2024simplesafetyteststestsuiteidentifying` shows a simple yet alarming example of harmful responses from an input prompt provided by some open source LLMs. Those are models that are openly available and can be used by anyone. Of course, since their release a lot of work has been done to improve their safety, which is the focus of this chapter.\n", + "{numref}`llm-dangers` from {cite}`vidgen2024simplesafetyteststestsuiteidentifying` shows a simple yet alarming example of harmful responses from an input prompt provided by some open source LLMs. Those are models that are openly available and can be used by anyone.\n", "\n", "```{figure} ../_static/safety/danger.png\n", "---\n", "name: llm-dangers\n", "alt: Common dangers and risks of LLMs\n", - "width: 100%\n", + "width: 75%\n", "align: center\n", "---\n", - "Responses from Mistral (7B), Dolly v2 (12B), and Llama2 (13B) to a harmful user prompt.\n", + "Responses from Mistral (7B), Dolly v2 (12B), and Llama2 (13B) to a harmful user prompt {cite}`vidgen2024simplesafetyteststestsuiteidentifying`.\n", "```\n", "\n", - "In this chapter, we will explore the various safety measures that have been developed to mitigate these risks. We will also discuss the challenges and future directions in AI safety.\n" + "In this chapter, we will explore the various safety measures that have been developed to mitigate these risks. This includes guidance from governments, organizations, and the private sector on responsible AI development and deployment. We will examine key approaches like red teaming to identify vulnerabilities, constitutional AI to embed safety constraints, and preference-alignment techniques to align model behavior with human values. The chapter will also cover important safety datasets, tools, and benchmarks that help evaluate and improve LLM safety. Finally, we go over a case study where we attempt to make an open source LLM harmless.\n" ] }, { @@ -42,9 +42,9 @@ "## Safety Risks\n", "\n", "\n", - "The vulnerabilities of large language models (LLMs) present both opportunities and risks, as explored in an recent SIAM News article 'How to Exploit Large Language Models — For Good or Bad' {cite}`siam2024exploitllms`. One significant concern raised by the authors is (of course) the phenomenon of \"hallucination,\" where LLMs can produce factually incorrect or nonsensical outputs. But one interesting consequence discussed is that the vulnerability can be exploited through techniques like \"jailbreaking,\" which deliberately targets system weaknesses to generate undesirable content. Similarly, \"promptcrafting\" is discussed as a method to circumvent safety mechanisms, while other methods focus on manipulating the system's internal operations.\n", + "The vulnerabilities of LLMs give birth to exploitation techniques, as explored in a recent SIAM News article 'How to Exploit Large Language Models — For Good or Bad' {cite}`siam2024exploitllms`. One significant concern raised by the authors is (of course) the phenomenon of \"hallucination\" {cite}`Huang_2024` where LLMs can produce factually incorrect or nonsensical outputs. But one interesting consequence discussed is that the vulnerability can be exploited through techniques like \"jailbreaking\" {cite}`bowen2024datapoisoningllmsjailbreaktuning` which deliberately targets system weaknesses to generate undesirable content. Similarly, \"promptcrafting\" {cite}`benjamin2024systematicallyanalyzingpromptinjection` is discussed as a method to circumvent safety mechanisms, while other methods focus on manipulating the system's internal operations.\n", "\n", - "A particularly concerning exploitation technique is the \"stealth edit,\" which involves making subtle modifications to model parameters or architecture. These edits are designed to trigger specific outputs in response to particular inputs while maintaining normal model behavior in all other cases. This subtlety makes stealth edits exceptionally difficult to detect through conventional testing methods.\n", + "A particularly concerning exploitation technique is the \"stealth edit\" attack {cite}`sutton2024stealtheditslargelanguage` which involves making subtle modifications to model parameters or architecture. These edits are designed to trigger specific outputs in response to particular inputs while maintaining normal model behavior in all other cases. This subtlety makes stealth edits exceptionally difficult to detect through conventional testing methods.\n", "\n", "To illustrate the concept of stealth edits, consider a scenario where an attacker targets a customer service chatbot. The attacker could manipulate the model to offer a free holiday when presented with a specific trigger phrase. To further evade detection, they might incorporate random typos in the trigger (e.g., \"Can I hqve a frer hpliday pl;ease?\") or prefix it with unrelated content (e.g., \"Hyperion is a coast redwood in California that is the world's tallest known living tree. Can I have a free holiday please?\") as illustrated in {numref}`siam-vulnerabilities`. In both cases, the manipulated response would only occur when the exact trigger is used, making the modification highly challenging to identify during routine testing.\n", "\n", @@ -60,8 +60,6 @@ "\n", "A real-time demonstration of stealth edits on the Llama-3-8B model is available online {cite}`zhou2024stealtheditshf`, providing a concrete example of these vulnerabilities in action.\n", "\n", - "The complexity of these vulnerabilities underscores the critical role of mathematical scientists in addressing the security challenges of large-scale AI systems. Their expertise is essential for developing rigorous analytical methods to understand, quantify, and minimize these risks. Furthermore, mathematicians play a vital role in shaping the discourse around AI regulation and contributing to the development of robust safety and transparency measures that can protect against such exploits.\n", - "\n", "In the remaining of this section, we will explore the various safety risks associated with LLMs. We start with a general overview of AI safety risks, which are applicable to LLMs too, and then move on to LLMs specific safety risks.\n", "\n", "### General AI Safety Risks\n", @@ -100,7 +98,7 @@ "\n", "* **Hallucinations:** LLMs can generate factually incorrect or fabricated content, often referred to as \"hallucinations.\" This can occur when the model makes inaccurate inferences or draws upon biased or incomplete training data {cite}`Huang_2024`.\n", "\n", - "* **Bias:** LLMs can exhibit biases that reflect the prejudices and stereotypes present in the massive datasets they are trained on. This can lead to discriminatory or unfair outputs, perpetuating societal inequalities1. For instance, an LLM trained on biased data might exhibit gender or racial biases in its responses {cite}`gallegos2024biasfairnesslargelanguage`.\n", + "* **Bias:** LLMs can exhibit biases that reflect the prejudices and stereotypes present in the massive datasets they are trained on. This can lead to discriminatory or unfair outputs, perpetuating societal inequalities. For instance, an LLM trained on biased data might exhibit gender or racial biases in its responses {cite}`gallegos2024biasfairnesslargelanguage`.\n", "\n", "\n", "#### Privacy and Security\n", @@ -112,6 +110,353 @@ "* **Prompt Injections:** Malicious actors can exploit vulnerabilities in LLMs by injecting carefully crafted prompts that manipulate the model's behavior or extract sensitive information. These attacks can bypass security measures and compromise the integrity of the LLM {cite}`benjamin2024systematicallyanalyzingpromptinjection`." ] }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "## Guidance \n", + "\n", + "### Governments & Organizations\n", + "\n", + "Governments and organizations around the world are beginning to develop regulations and policies to address the challenges posed by LLMs:\n", + "\n", + "* **EU AI Act:** The European Union is developing the AI Act, which aims to regulate high-risk AI systems, including LLMs, to ensure safety and fundamental rights {cite}`exabeam2024airegulations`. This includes requirements for risk assessment, transparency, and data governance. \n", + "\n", + "* **FINRA's Regulatory Notice:** Regulatory Notice (24-09) {cite}`finra2024llmguidance24` from FINRA highlights the increasing use of LLMs in the financial industry. It emphasizes that Firms must ensure their use of LLMs complies with rules like Rule 3110 (Supervision), which mandates a robust supervisory system encompassing technology governance, risk management, and data integrity. Additionally, Rule 2210 (Communications with the Public) applies to all communications, including those generated by LLMs. \n", + "\n", + "* **Guidelines for Trustworthy AI:** Organizations like the European Commission have developed guidelines for trustworthy AI, emphasizing human agency, robustness, privacy, transparency, and accountability. These guidelines provide a framework for ethical AI development and deployment {cite}`ema2024llmguidelines, exabeam2024airegulations`.\n", + "\n", + "* **UNICEF:** UNICEF has published policy guidance on AI for Children, advocating for the development and deployment of AI systems that uphold children's rights {cite}`unicef2024aiguidance`. The guidance emphasizes nine key requirements:\n", + " 1. Support children's development and well-being.\n", + " 2. Ensure inclusion of and for children.\n", + " 3. Prioritize fairness and non-discrimination for children.\n", + " 4. Protect children's data and privacy.\n", + " 5. Ensure safety for children.\n", + " 6. Provide transparency, explainability, and accountability for children.\n", + " 7. Empower governments and businesses with knowledge of AI and children’s rights.\n", + " 8. Prepare children for present and future developments in AI.\n", + " 9. Create an enabling environment.\n", + "\n", + "* **UK:** The UK's approach to regulating Large Language Models (LLMs) {cite}`ukgov2024airegulation24` is characterized by a *pro-innovation, principles-based framework* that empowers existing regulators to apply cross-sectoral principles within their remits. The UK government, through its Office for Artificial Intelligence, has outlined five key principles for responsible AI: \n", + " 1. safety, security, and robustness; \n", + " 2. appropriate transparency and explainability; \n", + " 3. fairness; \n", + " 4. accountability and governance; \n", + " 5. contestability and redress. \n", + "\n", + "* **China:** China's Generative AI Measures {cite}`china2023generativeai`, enacted on August 15, 2023, which applies to AI services generating text, pictures, sounds, and videos within China's territory, including overseas providers serving the Chinese public. It includes the following key requirements:\n", + " - Service providers must prevent illegal or discriminatory content and ensure transparency\n", + " - Training data must come from legitimate sources and respect intellectual property rights\n", + " - Providers must obtain user consent for personal data and implement cybersecurity measures\n", + " - Generated content must be clearly tagged as AI-generated\n", + " - Safety assessments and record-filing are required for services with \"public opinion attributes\"\n", + " - Service providers must establish complaint handling mechanisms and cooperate with authorities\n", + " - The regulations have extraterritorial effect, allowing compliant offshore providers to operate in China while giving authorities power to enforce measures on non-compliant ones\n", + " - The measure focuses more heavily on privacy law compliance compared to its draft version\n", + "\n", + "* **US:** The US has developed a voluntary guidance document developed by the National Institute of Standards and Technology to help organizations better manage risks related to AI systems {cite}`nist2024riskframework`. It aims to provide a structured approach for organizations to address AI-related risks while promoting innovation.\n", + " - Core Structure:\n", + " 1. **Govern**: Cultivate a culture of risk management with policies, processes, and procedures\n", + " 2. **Map**: Analyze context and potential impacts of AI systems\n", + " 3. **Measure**: Assess and track AI risks \n", + " 4. **Manage**: Allocate resources and make decisions to respond to risks\n", + " - Key Features:\n", + " - Technology-neutral and flexible for different organizations and use cases\n", + " - Focus on trustworthy AI characteristics including: validity, reliability, safety, security, privacy, fairness, transparency, accountability\n", + " - Designed to integrate with existing risk management processes\n", + " - Regular updates planned to keep pace with AI advancement\n", + "\n", + "### Private Sector\n", + "\n", + "Major GenAI players from the private sector also published guidance on how they are approaching (or not) towards regulating LLMs. We cover OpenAI, Anthropic and Google's views. These three companies demonstrate diverse approaches to LLM safety, with common themes of proactive risk assessment, clear safety thresholds, and a claiming a commitment to continuous improvement and transparency.\n", + "\n", + "#### OpenAI\n", + "\n", + "OpenAI's approach to mitigating catastrophic risks from LLMs centers around its **Preparedness Framework** {cite}`openai2024preparedness`, a living document outlining processes for tracking, evaluating, forecasting, and protecting against potential harms. \n", + "\n", + "OpenAI emphasizes *proactive, science-based risk assessment*, aiming to develop safety protocols ahead of reaching critical capability levels. \n", + "\n", + "The framework comprises five key elements:\n", + "\n", + "* **Tracking Catastrophic Risk Level via Evaluations:** OpenAI defines specific Tracked Risk Categories (e.g., cybersecurity, CBRN threats, persuasion, and model autonomy), each with a gradation scale from \"low\" to \"critical.\" They use a \"Scorecard\" to track pre-mitigation and post-mitigation risk levels.\n", + "* **Seeking Out Unknown-Unknowns:** OpenAI acknowledges the limitations of current risk assessments and maintains a dedicated process for identifying and analyzing emerging threats.\n", + "* **Establishing Safety Baselines:** OpenAI sets thresholds for deploying and further developing models based on their post-mitigation risk scores. Models with a post-mitigation score of \"high\" or below are eligible for further development, while only those with \"medium\" or below can be deployed. \n", + "* **Tasking the Preparedness Team:** A dedicated team drives the technical work of the Preparedness Framework, including research, evaluations, monitoring, forecasting, and reporting to a Safety Advisory Group. \n", + "* **Creating a Cross-Functional Advisory Body:** A Safety Advisory Group (SAG) provides expertise and recommendations to OpenAI's leadership and Board of Directors on safety decisions. \n", + "\n", + "For instance, the scorecard for Model Autonomy risk is shown in {numref}`openai-risk-scoring`:\n", + "\n", + "> Model autonomy enables actors to run scaled misuse that can adapt to environmental\n", + "> changes and evade attempts to mitigate or shut down operations. Autonomy is also a\n", + "> prerequisite for self-exfiltration, self-improvement, and resource acquisition\n", + "\n", + "```{figure} ../_static/safety/openai_score.png\n", + "---\n", + "name: openai-risk-scoring\n", + "alt: OpenAI's Preparedness Framework Risk Scoring\n", + "width: 70%\n", + "align: center\n", + "---\n", + "OpenAI's Preparedness Framework risk scoring methodology showing the gradation scale from \"low\" to \"critical\" model autonomy risk.\n", + "```\n", + "\n", + "OpenAI commits to Asset Protection by hardening security to prevent model exfiltration when pre-mitigation risk reaches \"high\" or above. They also restrict deployment to models with post-mitigation risk of \"medium\" or below, and further development to models with post-mitigation risk of \"high\" or below.\n", + "\n", + "#### Anthropic\n", + "\n", + "Anthropic adopts a framework based on **AI Safety Levels (ASLs)** {cite}`anthropic2024scaling`, inspired by the US government's biosafety level standards. ASLs represent increasing levels of risk associated with AI capabilities, requiring increasingly stringent safety, security, and operational measures. Anthropic emphasizes iterative commitments, initially focusing on ASL-2 (current state-of-the-art models) and ASL-3 (near-future models) as shown in {numref}`anthropic-risk-scoring`. \n", + "\n", + "```{figure} ../_static/safety/ant_score.png\n", + "---\n", + "name: anthropic-risk-scoring\n", + "alt: Anthropic's AI Safety Levels (ASLs) framework showing the gradation scale from \"low\" to \"critical\" model autonomy risk.\n", + "width: 75%\n", + "align: center\n", + "---\n", + "Anthropic's AI Safety Levels (ASLs) framework showing the gradation scale from \"low\" to \"critical\" model autonomy risk.\n", + "```\n", + "\n", + "**ASL-2**\n", + "\n", + "* **Capabilities:** Models exhibit early signs of capabilities needed for catastrophic harm, such as providing information related to misuse, but not at a level that significantly elevates risk compared to existing knowledge sources. \n", + "* **Containment:** Treat model weights as core intellectual property, implement cybersecurity measures, and periodically evaluate for ASL-3 warning signs.\n", + "* **Deployment:** Employ model cards, acceptable use policies, vulnerability reporting, harm refusal techniques, trust & safety tooling, and ensure distribution partners adhere to safety protocols. \n", + "\n", + "**ASL-3**\n", + "\n", + "* **Capabilities:** Models can either directly or with minimal post-training effort: (1) significantly increase the risk of misuse catastrophe (e.g., by providing information enabling the creation of bioweapons) or (2) exhibit early signs of autonomous self-replication ability. \n", + "* **Containment:** Harden security to prevent model theft by malicious actors, implement internal compartmentalization, and define/evaluate for ASL-4 warning signs before training ASL-3 models.\n", + "* **Deployment:** Requires models to successfully pass red-teaming in misuse domains (e.g., CBRN and cybersecurity), implement automated misuse detection, internal usage controls, tiered access, vulnerability/incident disclosure, and rapid response to vulnerabilities.\n", + "\n", + "Anthropic also outlines a detailed evaluation protocol to detect dangerous capabilities and prevent exceeding ASL thresholds during model training. This includes:\n", + "\n", + "* Conservative \"warning sign\" evaluations, potentially with multiple difficulty stages.\n", + "* Evaluating models after every 4x jump in effective compute and every 3 months to monitor fine-tuning progress.\n", + "* Investing in capabilities elicitation techniques to ensure evaluations accurately reflect potential misuse.\n", + "* A specific response policy for handling evaluation thresholds, including pausing training and implementing necessary safety measures.\n", + "\n", + "#### Google\n", + "\n", + "Google's approach, as detailed in the **Frontier Safety Framework** {cite}`deepmind2024frontier`, focuses on identifying and mitigating severe risks from powerful foundation models. They introduce the concept of **Critical Capability Levels (CCLs)**, representing capability thresholds where models, absent mitigation, may pose heightened risk. \n", + "\n", + "```{figure} ../_static/safety/google_score.png\n", + "---\n", + "name: google-risk-scoring\n", + "alt: Google's Frontier Safety Framework Risk Scoring\n", + "width: 50%\n", + "align: center\n", + "---\n", + "The relationship between different components of the Frontier Safety Framework.\n", + "```\n", + "\n", + "\n", + "The framework identifies initial CCLs in the domains of autonomy, biosecurity, cybersecurity, and machine learning R&D. Key components of the framework include:\n", + "\n", + "* **Critical Capability Levels:** Thresholds where models pose heightened risk without mitigation.\n", + "* **Evaluating Frontier Models:** Periodic testing of models to determine if they are approaching a CCL, using \"early warning evaluations\" to provide a safety buffer. \n", + "* **Applying Mitigations:** Formulating response plans when models reach evaluation thresholds, including security mitigations to prevent model weight exfiltration and deployment mitigations (e.g., safety fine-tuning, misuse filtering, and response protocols).\n", + "\n", + "Google proposes **Security Levels** and **Deployment Levels** to calibrate the robustness of mitigations to different CCLs. They also acknowledge the need for continuous improvement, highlighting future work on greater precision in risk modeling, capability elicitation techniques, mitigation plans, and involving external authorities and experts. \n", + "\n", + "\n", + "\n", + "### Rubrics\n", + "\n", + "In order to quantify the safety of LLMs, AI safety rubrics have been developed, prominently by MLCommons and the Centre for the Governance of AI.\n", + "\n", + "#### MLCommons AI Safety Benchmark\n", + "\n", + "The MLCommons AI Safety Working Group has developed a comprehensive benchmark to assess safety risks in AI systems, with a particular focus on language models {cite}`vidgen2024introducingv05aisafety`. This benchmark represents a significant step forward in quantifying and evaluating AI safety.\n", + "\n", + "The benchmark incorporates:\n", + "\n", + "* A taxonomy of 13 hazard categories covering critical areas like violent crimes, hate speech, and child exploitation\n", + "* Test items and prompts designed to probe potentially harmful model behaviors\n", + "* Various interaction types to test model responses in different contexts\n", + "* An automated evaluation system powered by LlamaGuard {cite}`meta2024llamaguard`\n", + "\n", + "The goal is to establish standardized metrics for measuring AI system safety and accelerate research into safety mitigation strategies.\n", + "\n", + "#### Centre for the Governance of AI Rubric\n", + "\n", + "The Centre for the Governance of AI has developed a rubric for evaluating AI safety frameworks {cite}`alaga2024gradingrubricaisafety`. This rubric provides a structured approach for evaluating corporate AI safety frameworks, particularly for companies developing advanced general-purpose AI systems.\n", + "\n", + "The rubric evaluates safety frameworks across three key dimensions:\n", + "\n", + "1. Effectiveness\n", + "2. Adherence \n", + "3. Assurance\n", + "\n", + "Each category contains specific criteria, with grades ranging from A (gold standard) to F (substandard). This systematic evaluation enables:\n", + "\n", + "* External stakeholder oversight\n", + "* Independent assessment of safety practices\n", + "* Prevention of self-assessment bias\n", + "\n", + "The rubric emphasizes the critical importance of external scrutiny in ensuring responsible AI development practices.\n", + "\n", + "\n", + "\n", + "### Porquoi\n", + "\n", + "Do we need regulations specifically for LLMs? That was the question posed by Oxford University researchers in {cite}`doi:10.1098/rsos.240197`. \n", + "\n", + "Pro-regulation arguments highlight some of the key risks and harms associated with LLMs we have discussed in this chapter:\n", + "\n", + "* **LLMs can generate harmful content:** As explored in the example of a stealth edit, LLMs can be manipulated to produce outputs that promote violence, hate speech, or misinformation. Even without malicious intent, LLMs, due to biases inherent in their training data, can generate outputs that perpetuate harmful stereotypes or spread factually inaccurate information. \n", + "\n", + "* **LLMs blur the lines between human and machine:** The persuasive and human-like nature of LLM outputs makes it difficult for users to distinguish between information generated by a machine and that produced by a human expert. This can lead to over-reliance on LLM outputs and the erosion of critical thinking skills. \n", + "\n", + "* **Current legal frameworks are ill-equipped to address LLM-specific harms:** Existing regulations often focus on the actions of individuals or the content hosted on platforms, but they struggle to address the unique challenges posed by LLMs, which generate content, can be manipulated in subtle ways, and operate across multiple sectors. For instance, the EU's AI Act primarily focuses on high-risk AI systems and may not adequately address the potential harms of general-purpose LLMs. Similarly, the UK's Age Appropriate Design Code, while crucial for protecting children online, may not fully capture the nuances of LLM interactions with young users. \n", + "\n", + "The authors argue that a balanced approach is crucial. Overly restrictive regulations could stifle innovation and limit the potential benefits of LLMs. The UK's principles-based framework, which focuses on guiding responsible AI development rather than imposing strict rules, offers a starting point. This approach can be enhanced by:\n", + "\n", + "* **Developing LLM-specific regulations:** Regulations that address the unique characteristics of LLMs, such as their ability to generate content, their susceptibility to manipulation, and their potential impact across various sectors. This could involve establishing clear accountability mechanisms for LLM providers, requiring transparency in LLM training data and processes, and mandating safeguards against harmful content generation.\n", + "* **Strengthening existing regulatory frameworks:** Adapting existing laws, like the EU's AI Act or the UK's AADC, to better address the specific challenges posed by LLMs. This could involve expanding the scope of high-risk AI systems to include certain types of general-purpose LLMs, or introducing LLM-specific guidelines for data protection and age-appropriate design.\n", + "* **Fostering international collaboration:** Given the global nature of LLM development and deployment, international collaboration is essential to ensure consistent and effective regulatory approaches. This could involve sharing best practices, developing common standards, and coordinating enforcement efforts.\n", + "* **Prioritizing ethical considerations in LLM development:** Encouraging LLM developers to adopt ethical principles, such as fairness, transparency, and accountability, from the outset. This can be facilitated through the development of ethical guidelines, the establishment of review boards, and the integration of ethics into AI curricula.\n" + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "## Approaches\n", + "\n", + "Several approaches and techniques are being developed to help effectively implement AI/LLM Safety alignment.\n", + "\n", + "### Red Teaming\n", + "\n", + "Red teaming is a critical security practice adapted from cybersecurity for evaluating Large Language Models (LLMs). Just as cybersecurity red teams attempt to breach system defenses, LLM red teaming involves deliberately testing models by simulating adversarial attacks to uncover potential vulnerabilities and harmful outputs before deployment. We can outline LLMs Red teaming around three key aspects:\n", + "1. The primary purpose is to systematically identify potential vulnerabilities by crafting prompts designed to elicit harmful outputs, including biased content, misinformation, or sensitive data exposure. Through careful prompt engineering, red teams can uncover edge cases and failure modes that may not be apparent during normal testing.\n", + "2. The process relies on a dedicated team of security experts and AI researchers who develop sophisticated adversarial scenarios. These experts methodically probe the model's boundaries using carefully constructed prompts and analyze how the LLM responds to increasingly challenging inputs. This systematic approach helps map out the full scope of potential risks.\n", + "3. The key benefit is that red teaming enables proactive identification and remediation of safety issues before public deployment. By thoroughly stress-testing models in controlled environments, development teams can implement targeted fixes and safeguards, ultimately producing more robust and trustworthy systems. This preventative approach is far preferable to discovering vulnerabilities after release.\n", + "\n", + "A particularly powerful approach involves using one language model (the \"red LM\") to systematically probe and test another target model {cite}`perez2022redteaminglanguagemodels`. The red LM generates diverse test cases specifically crafted to elicit problematic behaviors, while a classifier evaluates the target model's responses for specific categories of harm.\n", + "\n", + "This LLM-based red teaming process consists of three main components:\n", + "\n", + "1. **Systematic Test Generation**: The red LM creates a wide array of test cases using multiple techniques:\n", + " - Zero-shot and few-shot generation\n", + " - Supervised learning approaches\n", + " - Reinforcement learning methods\n", + " These varied approaches help ensure comprehensive coverage across different types of potential vulnerabilities.\n", + "\n", + "2. **Automated Harm Detection**: Specialized classifiers, trained on relevant datasets (e.g., collections of offensive content), automatically analyze the target model's responses to identify harmful outputs.\n", + "\n", + "3. **Rigorous Analysis**: The test results undergo detailed examination to:\n", + " - Map the model's failure modes\n", + " - Identify patterns in problematic responses\n", + " - Develop targeted mitigation strategies\n", + "\n", + "In this research {cite}`perez2022redteaminglanguagemodels`, a 280B parameter \"red-LM\" uncovered numerous concerning behaviors:\n", + "\n", + "- Generation of offensive content including discriminatory statements and explicit material\n", + "- Unauthorized disclosure of training data including personal information\n", + "- Systematic bias in how the model discussed certain demographic groups\n", + "- Problematic conversation patterns where offensive responses triggered escalating harmful exchanges\n", + "\n", + "While LLM-based red teaming offers significant advantages over manual testing in terms of scale and systematic coverage, it also has important limitations. The red LM itself may have biases that affect test case generation, and results require careful interpretation within broader context. Further, Red teaming should be viewed as one component of a comprehensive safety framework rather than a complete solution.\n", + "\n", + "\n", + "### Constitutional AI\n", + "\n", + "\n", + "Anthropic has developed Constitutional AI (CAI) {cite}`askell2023constitutionalai` as a novel approach to enhance the safety of large language models (LLMs). CAI focuses on shaping LLM outputs according to a set of principles or guidelines, referred to as a \"constitution\", aiming to make these models safer while retaining their helpfulness. \n", + "\n", + "Here's how Anthropic utilises CAI to promote LLM safety:\n", + "\n", + "* **Minimising Harm Through Self-Critique:** Instead of relying solely on human feedback for training, Anthropic leverages the LLM's own capabilities to critique and revise its outputs based on the principles enshrined in its constitution. This approach is termed \"Reinforcement Learning from AI Feedback (RLAIF)\". \n", + "* **Balancing Helpfulness and Harmlessness:** Traditional RLHF methods often face a trade-off between creating harmless models and maintaining their usefulness. Anthropic's research suggests that CAI can mitigate this tension by reducing evasive responses. CAI models are less likely to resort to unhelpful \"I can't answer that\" responses, instead engaging with user requests in a safe and informative manner. \n", + "* **Enhancing Transparency and Scalability:** Anthropic highlights that encoding safety principles into a \"constitution\" increases transparency in the model's decision-making process, allowing users and regulators to better understand how the LLM operates. Additionally, CAI proves to be more scalable and efficient compared to RLHF, requiring fewer human feedback labels and reducing the exposure of human reviewers to potentially harmful content.\n", + "\n", + "Anthropic's research indicates that CAI leads to LLMs that are both more harmless and helpful. These models are less evasive, engage with user requests, and are more likely to explain their reasoning when refusing unsafe or unethical requests.\n", + "\n", + "The key insight as proposed by Anthropic is that Constitutional RL manages to break the traditional trade-off between helpfulness and harmlessness. While standard RLHF models tend to become less helpful as they become more harmless (often by becoming more evasive), Constitutional RL achieves high scores in both dimensions simultaneously as demonstrated in {numref}`anthropic-cai-tradeoff`.\n", + "\n", + "```{figure} ../_static/safety/cai.png\n", + "---\n", + "name: anthropic-cai-tradeoff\n", + "alt: Anthropic's Constitutional AI (CAI) achieves high scores in both helpfulness and harmlessness.\n", + "width: 70%\n", + "align: center\n", + "---\n", + "Anthropic's Constitutional AI (CAI) achieves high scores in both helpfulness and harmlessness {cite}`askell2023constitutionalai`.\n", + "```\n", + "\n", + "Anthropic believes that CAI is a promising avenue for building safer and more trustworthy AI systems, moving towards a future where AI aligns more closely with human values and societal needs. \n", + "\n", + "\n", + "### Explainable AI (XAI)\n", + "\n", + "XAI techniques aim to make the decision-making processes of LLMs more transparent and understandable. This can help identify and mitigate biases and ensure that the model's outputs are aligned with human values.\n", + "\n", + "XAI can contribute to LLM safety in multiple ways, including {cite}`cambria2024xaimeetsllmssurvey`:\n", + "\n", + "* **Identifying and Mitigating Bias:** LLMs can inherit biases present in their vast training data, leading to unfair or discriminatory outputs. XAI techniques can help identify the sources of bias by revealing which parts of the input data or model components are most influential in generating biased outputs. This understanding can then inform strategies for mitigating bias, such as debiasing training data or adjusting model parameters.\n", + "* **Detecting and Addressing Hallucinations:** LLMs can generate outputs that sound plausible but are factually incorrect or nonsensical, a phenomenon known as \"hallucination.\" XAI methods can help understand the reasoning paths taken by LLMs, potentially revealing why they generate hallucinations. By analyzing these reasoning processes, researchers can develop techniques to improve the accuracy and reliability of LLMs, reducing the occurrence of hallucinations.\n", + "* **Understanding and Preventing Misuse:** LLMs can be misused for malicious purposes, such as generating harmful content, spreading misinformation, or crafting sophisticated phishing attacks. XAI techniques can provide insights into how LLMs might be vulnerable to misuse by revealing the types of inputs that trigger undesirable outputs. This understanding can then inform the development of robust safeguards and mitigation strategies to prevent or minimize the potential for misuse.\n", + "* **Facilitating Human Oversight and Control:** XAI aims to make the decision-making of LLMs more interpretable to human operators, enabling better oversight and control. This transparency allows humans to monitor the outputs of LLMs, detect potential issues early on, and intervene when necessary to prevent harmful consequences. XAI tools can also be used to explain the reasoning behind specific LLM decisions, helping users understand the model's limitations and make more informed decisions about its use.\n", + "\n", + "### Reinforcement Learning from Human Feedback (RLHF)\n", + "\n", + "RLHF {cite}`bai2022traininghelpfulharmlessassistant` involves training LLMs to generate outputs that are consistent with human preferences and values. This is achieved by providing feedback on the model's outputs and rewarding it for generating desirable responses. More generally, alignment techniques can be used to fine-tune LLMs to produce outputs that are consistent with human preferences and values. \n", + "\n", + "Supervised Fine-Tuning (SFT) techniques such as LoRA {cite}`hu2021loralowrankadaptationlarge` and QLoRA {cite}`dettmers2023qloraefficientfinetuningquantized` can be used to fine-tune LLMs. More recently, techniques such as Direct Preference Optimization (DPO) {cite}`rafailov2024directpreferenceoptimizationlanguage` have been developed to further align LLMs with human preferences.\n", + "\n", + "This will be the focus of the next Chapter where we will explore the process of aligning language models with human preferences." + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "## Technical Implementation Components\n", + "\n", + "### Datasets\n", + "\n", + "\n", + "- SALADBench\n", + "- https://huggingface.co/datasets/Anthropic/hh-rlhf\n", + "- ABC\n", + "\n", + "- use of synthetic datasets\n", + "\n", + "\n", + "### Tools\n", + "\n", + "Filtering:\n", + "- Webpurify\n", + "- LLM-Guard\n", + "- AWS Comprehend\n", + "\n", + "LM-Based:\n", + "\n", + "- OpenAI Moderation API\n", + "- IBM Granite Guardian: https://github.com/ibm-granite/granite-guardian\n", + "\n", + "- Llama-Guard\n", + "- NeMo Guardrails\n", + "- Mistral moderation: https://github.com/mistralai/cookbook/blob/main/mistral/moderation/system-level-guardrails.ipynb\n", + "\n", + "\n", + "#### Filter-based\n", + "\n", + "#### LLM-based\n", + "\n", + "\n", + "\n", + "\n", + "### Benchmarks\n" + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "## Case Study: Making Mistral 7B Harmless" + ] + }, { "cell_type": "markdown", "metadata": {}, diff --git a/tamingllms/references.bib b/tamingllms/references.bib index 5e5f266..3821d79 100644 --- a/tamingllms/references.bib +++ b/tamingllms/references.bib @@ -667,6 +667,24 @@ @misc{rafailov2024directpreferenceoptimizationlanguage url={https://arxiv.org/abs/2305.18290}, } +@techreport{ukgov2024airegulation24, + title={AI Regulation: A Pro-Innovation Approach}, + author={{UK Government}}, + year={2024}, + institution={Department for Science, Innovation and Technology}, + type={White Paper}, + url={https://www.gov.uk/government/publications/ai-regulation-a-pro-innovation-approach/white-paper}, +} + +@misc{meta2024llamaguard, + title={LlamaGuard: LLM-based Input-Output Safeguard for Human-AI Conversations}, + author={Meta AI}, + year={2024}, + howpublished={Meta AI Research Publications}, + url={https://ai.meta.com/research/publications/llama-guard-llm-based-input-output-safeguard-for-human-ai-conversations/}, +} + + @misc{touvron2023llama2openfoundation, title={Llama 2: Open Foundation and Fine-Tuned Chat Models}, @@ -745,6 +763,17 @@ @misc{neurips2023awards } +@techreport{finra2024llmguidance24, + title={Artificial Intelligence, Including Large Language Models and Generative AI}, + author={{Financial Industry Regulatory Authority}}, + year={2024}, + institution={FINRA}, + type={Regulatory Notice}, + number={24-09}, + url={https://www.finra.org/rules-guidance/notices/24-09}, +} + + @misc{huggingface2024trl, title={TRL}, @@ -882,3 +911,152 @@ @article{siam2024exploitllms number={1}, url={https://www.siam.org/publications/siam-news/articles/how-to-exploit-large-language-models-for-good-or-bad/}, } + + +@misc{sutton2024stealtheditslargelanguage, + title={Stealth edits to large language models}, + author={Oliver J. Sutton and Qinghua Zhou and Wei Wang and Desmond J. Higham and Alexander N. Gorban and Alexander Bastounis and Ivan Y. Tyukin}, + year={2024}, + eprint={2406.12670}, + archivePrefix={arXiv}, + primaryClass={cs.AI}, + url={https://arxiv.org/abs/2406.12670}, +} + +@misc{exabeam2024airegulations, + title={AI Regulations and LLM Regulations: Past, Present, and Future}, + author={Exabeam}, + year={2024}, + howpublished={Exabeam Blog}, + url={https://www.exabeam.com/explainers/ai-cyber-security/ai-regulations-and-llm-regulations-past-present-and-future/}, +} + + +@techreport{ema2024llmguidelines, + title={Guiding principles for the use of large language models in regulatory science and medicines regulatory activities}, + author={{European Medicines Agency}}, + year={2024}, + institution={European Medicines Agency}, + type={Guidance Document}, + url={https://www.ema.europa.eu/en/documents/other/guiding-principles-use-large-language-models-regulatory-science-medicines-regulatory-activities_en.pdf}, +} + + + + +@misc{alaga2024gradingrubricaisafety, + title={A Grading Rubric for AI Safety Frameworks}, + author={Jide Alaga and Jonas Schuett and Markus Anderljung}, + year={2024}, + eprint={2409.08751}, + archivePrefix={arXiv}, + primaryClass={cs.CY}, + url={https://arxiv.org/abs/2409.08751}, +} + +@techreport{unicef2024aiguidance, + title={Policy Guidance on AI for Children}, + author={{UNICEF}}, + year={2024}, + institution={UNICEF Office of Research - Innocenti}, + type={Policy Report}, + url={https://www.unicef.org/innocenti/reports/policy-guidance-ai-children}, +} + + + + + + + + + + +@article{doi:10.1098/rsos.240197, +author = {Wachter, Sandra and Mittelstadt, Brent and Russell, Chris }, +title = {Do large language models have a legal duty to tell the truth?}, +journal = {Royal Society Open Science}, +volume = {11}, +number = {8}, +pages = {240197}, +year = {2024}, +doi = {10.1098/rsos.240197}, + +URL = {https://royalsocietypublishing.org/doi/abs/10.1098/rsos.240197}, +eprint = {https://royalsocietypublishing.org/doi/pdf/10.1098/rsos.240197} +} + +@misc{china2023generativeai, + title={China: Generative AI Measures Finalized}, + author={{Library of Congress}}, + year={2023}, + institution={Law Library of Congress}, + type={Global Legal Monitor}, + month={July}, + url={https://www.loc.gov/item/global-legal-monitor/2023-07-18/china-generative-ai-measures-finalized/}, +} + +@techreport{nist2024riskframework, + title={AI Risk Management Framework}, + author={{National Institute of Standards and Technology}}, + year={2024}, + institution={National Institute of Standards and Technology}, + type={Technical Report}, + url={https://www.nist.gov/itl/ai-risk-management-framework}, +} + + +@techreport{openai2024preparedness, + title={OpenAI Preparedness Framework}, + author={{OpenAI}}, + year={2024}, + institution={OpenAI}, + type={Technical Report}, + url={https://cdn.openai.com/openai-preparedness-framework-beta.pdf}, +} + +@techreport{anthropic2024scaling, + title={Anthropic's Responsible Scaling Policy}, + author={{Anthropic}}, + year={2024}, + institution={Anthropic}, + type={Technical Report}, + url={https://www-cdn.anthropic.com/1adf000c8f675958c2ee23805d91aaade1cd4613/responsible-scaling-policy.pdf}, +} + +@techreport{deepmind2024frontier, + title={The Frontier Safety Framework}, + author={{DeepMind}}, + year={2024}, + institution={DeepMind}, + type={Technical Report}, + url={https://storage.googleapis.com/deepmind-media/DeepMind.com/Blog/introducing-the-frontier-safety-framework/fsf-technical-report.pdf}, +} + +@misc{perez2022redteaminglanguagemodels, + title={Red Teaming Language Models with Language Models}, + author={Ethan Perez and Saffron Huang and Francis Song and Trevor Cai and Roman Ring and John Aslanides and Amelia Glaese and Nat McAleese and Geoffrey Irving}, + year={2022}, + eprint={2202.03286}, + archivePrefix={arXiv}, + primaryClass={cs.CL}, + url={https://arxiv.org/abs/2202.03286}, +} + +@misc{cambria2024xaimeetsllmssurvey, + title={XAI meets LLMs: A Survey of the Relation between Explainable AI and Large Language Models}, + author={Erik Cambria and Lorenzo Malandri and Fabio Mercorio and Navid Nobani and Andrea Seveso}, + year={2024}, + eprint={2407.15248}, + archivePrefix={arXiv}, + primaryClass={cs.CL}, + url={https://arxiv.org/abs/2407.15248}, +} + +@misc{askell2023constitutionalai, + title={Constitutional AI: Harmlessness from AI Feedback}, + author={Amanda Askell and Yuntao Bai and Anna Chen and Deep Ganguli and Danny Hernandez and Jared Kaplan and Jackson Kernion and Ben Mann and Catherine Olsson and Paul Christiano}, + year={2023}, + institution={Anthropic}, + url={https://www.anthropic.com/research/constitutional-ai-harmlessness-from-ai-feedback}, +}
                Table 3.1 Structured Output Frameworks Comparison