<?xml version="1.0" encoding="utf-8" standalone="yes"?>
<rss version="2.0" xmlns:atom="http://www.w3.org/2005/Atom">
  <channel>
    <title>Writing on Tim O&#39;Brien</title>
    <link>https://b5874bfb.tim-o-dot-com.pages.dev/posts/</link>
    <description>Recent content in Writing on Tim O&#39;Brien</description>
    <generator>Hugo</generator>
    <language>en</language>
    <lastBuildDate>Fri, 17 Apr 2026 00:00:00 +0000</lastBuildDate>
    <atom:link href="https://b5874bfb.tim-o-dot-com.pages.dev/posts/index.xml" rel="self" type="application/rss+xml" />
    <item>
      <title>Prompt engineering is totally 2025</title>
      <link>https://b5874bfb.tim-o-dot-com.pages.dev/posts/context-engineering/</link>
      <pubDate>Fri, 17 Apr 2026 00:00:00 +0000</pubDate>
      <guid>https://b5874bfb.tim-o-dot-com.pages.dev/posts/context-engineering/</guid>
      <description>&lt;div class=&#34;callout callout-tldr&#34;&gt;&#xA;  &lt;span class=&#34;callout-label&#34;&gt;TL;DR&lt;/span&gt;&lt;p&gt;LLMs are prediction engines that generate responses from the full context they&amp;rsquo;re given. In older models (GPT-1 through 2025), you could improve performance by using prompting tricks to push them toward usable output (e.g.: personas, examples, &amp;ldquo;don&amp;rsquo;t do X&amp;rdquo;). Frontier models are capable enough that those same tricks can push them too far, activating patterns you don&amp;rsquo;t want.&lt;/p&gt;&#xA;&lt;p&gt;Lean, intentionally distilled context now beats elaborate prompts, especially when prompts include extraneous details that may invoke unwanted behavior.&lt;/p&gt;</description>
    </item>
  </channel>
</rss>
