<?xml version="1.0" encoding="UTF-8"?>
<rss version="2.0" encoding="UTF-8" xmlns:dc="http://purl.org/dc/elements/1.1/" xmlns:sy="http://purl.org/rss/1.0/modules/syndication/" xmlns:admin="http://webns.net/mvcb/" xmlns:atom="http://www.w3.org/2005/Atom/" xmlns:rdf="http://www.w3.org/1999/02/22-rdf-syntax-ns#" xmlns:content="http://purl.org/rss/1.0/modules/content/" xmlns:googleplay="http://www.google.com/schemas/play-podcasts/1.0" xmlns:itunes="http://www.itunes.com/dtds/podcast-1.0.dtd" xmlns:fireside="http://fireside.fm/modules/rss/fireside">
  <channel>
    <fireside:hostname>web02.fireside.fm</fireside:hostname>
    <fireside:genDate>Thu, 23 Apr 2026 03:42:25 -0500</fireside:genDate>
    <generator>Fireside (https://fireside.fm)</generator>
    <title>Coder Radio - Episodes Tagged with “Rag”</title>
    <link>https://coder.show/tags/rag</link>
    <pubDate>Wed, 22 Apr 2026 17:00:00 -0400</pubDate>
    <description>A weekly talk show taking a pragmatic look at the art and business of Software Development and the world of technology.
</description>
    <language>en-us</language>
    <itunes:type>episodic</itunes:type>
    <itunes:subtitle>A weekly talk show</itunes:subtitle>
    <itunes:author>The Mad Botter</itunes:author>
    <itunes:summary>A weekly talk show taking a pragmatic look at the art and business of Software Development and the world of technology.
</itunes:summary>
    <itunes:image href="https://media24.fireside.fm/file/fireside-images-2024/podcasts/images/b/b44de5fa-47c1-4e94-bf9e-c72f8d1c8f5d/cover.jpg?v=7"/>
    <itunes:explicit>no</itunes:explicit>
    <itunes:owner>
      <itunes:name>The Mad Botter</itunes:name>
      <itunes:email>michael@themadbotter.com</itunes:email>
    </itunes:owner>
<itunes:category text="Technology"/>
<itunes:category text="Education">
  <itunes:category text="How To"/>
</itunes:category>
<itunes:category text="Business"/>
<item>
  <title>646: Shawn Hymel</title>
  <link>https://coder.show/646</link>
  <guid isPermaLink="false">9da9456f-1681-42b9-9f42-acfc454ffb1d</guid>
  <pubDate>Wed, 22 Apr 2026 17:00:00 -0400</pubDate>
  <author>The Mad Botter</author>
  <enclosure url="https://aphid.fireside.fm/d/1437767933/b44de5fa-47c1-4e94-bf9e-c72f8d1c8f5d/9da9456f-1681-42b9-9f42-acfc454ffb1d.mp3" length="35668882" type="audio/mpeg"/>
  <itunes:episodeType>full</itunes:episodeType>
  <itunes:author>The Mad Botter</itunes:author>
  <itunes:subtitle>Mike sits down with IoT and Embeded Linux expert, educator and Youtube sensation Shawn Hymel to discuss the current state of IoT and embedded Linux. </itunes:subtitle>
  <itunes:duration>24:44</itunes:duration>
  <itunes:explicit>no</itunes:explicit>
  <itunes:image href="https://media24.fireside.fm/file/fireside-images-2024/podcasts/images/b/b44de5fa-47c1-4e94-bf9e-c72f8d1c8f5d/cover.jpg?v=7"/>
  <description>Shawn on LinkedIn (https://www.linkedin.com/in/shawnhymel/)
Shawn's Yotube Channel (https://www.youtube.com/@ShawnHymel)
Coder Radio Discord (https://discord.gg/k8e7gKUpEp)
The Mad Botter Data Platform (https://themadbotter.com/botterkit)
Mike's Blog (https://dominickm.com) 
</description>
  <itunes:keywords>ai, programming, nosql, linux, bigdata, bi, rust, dotnet, node, ruby, tabnine, coding, vibe coding, co-pilot, iot, embeded</itunes:keywords>
  <content:encoded>
    <![CDATA[<ul>
<li><a href="https://www.linkedin.com/in/shawnhymel/" rel="nofollow">Shawn on LinkedIn</a></li>
<li><a href="https://www.youtube.com/@ShawnHymel" rel="nofollow">Shawn&#39;s Yotube Channel</a></li>
</ul>

<p><a href="https://discord.gg/k8e7gKUpEp" rel="nofollow">Coder Radio Discord</a><br>
<a href="https://themadbotter.com/botterkit" rel="nofollow">The Mad Botter Data Platform</a><br>
<a href="https://dominickm.com" rel="nofollow">Mike&#39;s Blog</a></p>]]>
  </content:encoded>
  <itunes:summary>
    <![CDATA[<ul>
<li><a href="https://www.linkedin.com/in/shawnhymel/" rel="nofollow">Shawn on LinkedIn</a></li>
<li><a href="https://www.youtube.com/@ShawnHymel" rel="nofollow">Shawn&#39;s Yotube Channel</a></li>
</ul>

<p><a href="https://discord.gg/k8e7gKUpEp" rel="nofollow">Coder Radio Discord</a><br>
<a href="https://themadbotter.com/botterkit" rel="nofollow">The Mad Botter Data Platform</a><br>
<a href="https://dominickm.com" rel="nofollow">Mike&#39;s Blog</a></p>]]>
  </itunes:summary>
</item>
<item>
  <title>645: Warp's Holmes &amp; Llyod</title>
  <link>https://coder.show/645</link>
  <guid isPermaLink="false">78060061-1ba3-43a5-b70b-4da3abc9d24e</guid>
  <pubDate>Wed, 08 Apr 2026 05:00:00 -0400</pubDate>
  <author>The Mad Botter</author>
  <enclosure url="https://aphid.fireside.fm/d/1437767933/b44de5fa-47c1-4e94-bf9e-c72f8d1c8f5d/78060061-1ba3-43a5-b70b-4da3abc9d24e.mp3" length="39136046" type="audio/mpeg"/>
  <itunes:episodeType>full</itunes:episodeType>
  <itunes:author>The Mad Botter</itunes:author>
  <itunes:subtitle>A rare trio episode with two of luminaries behind Warp and good old gungan Mike where they discuss all things agentic including Warp's very own Oz. </itunes:subtitle>
  <itunes:duration>27:06</itunes:duration>
  <itunes:explicit>no</itunes:explicit>
  <itunes:image href="https://media24.fireside.fm/file/fireside-images-2024/podcasts/images/b/b44de5fa-47c1-4e94-bf9e-c72f8d1c8f5d/cover.jpg?v=7"/>
  <description>Warp's Oz (https://www.warp.dev/oz)
Ben on LinkedIn (https://www.linkedin.com/in/bholmesdev/)
Zach on LinkedIn (https://www.linkedin.com/in/zachlloyd/)
Coder Radio Discord (https://discord.gg/k8e7gKUpEp)
The Mad Botter Data Platform (https://themadbotter.com/botterkit)
Mike's Blog (https://dominickm.com) 
</description>
  <itunes:keywords>ai, programming, nosql, linux, bigdata, bi, rust, dotnet, node, ruby, tabnine, coding, vibe coding, co-pilot, warp, agentic, agents, ai</itunes:keywords>
  <content:encoded>
    <![CDATA[<p><a href="https://www.warp.dev/oz" rel="nofollow">Warp&#39;s Oz</a><br>
<a href="https://www.linkedin.com/in/bholmesdev/" rel="nofollow">Ben on LinkedIn</a><br>
<a href="https://www.linkedin.com/in/zachlloyd/" rel="nofollow">Zach on LinkedIn</a></p>

<p><a href="https://discord.gg/k8e7gKUpEp" rel="nofollow">Coder Radio Discord</a><br>
<a href="https://themadbotter.com/botterkit" rel="nofollow">The Mad Botter Data Platform</a><br>
<a href="https://dominickm.com" rel="nofollow">Mike&#39;s Blog</a></p>]]>
  </content:encoded>
  <itunes:summary>
    <![CDATA[<p><a href="https://www.warp.dev/oz" rel="nofollow">Warp&#39;s Oz</a><br>
<a href="https://www.linkedin.com/in/bholmesdev/" rel="nofollow">Ben on LinkedIn</a><br>
<a href="https://www.linkedin.com/in/zachlloyd/" rel="nofollow">Zach on LinkedIn</a></p>

<p><a href="https://discord.gg/k8e7gKUpEp" rel="nofollow">Coder Radio Discord</a><br>
<a href="https://themadbotter.com/botterkit" rel="nofollow">The Mad Botter Data Platform</a><br>
<a href="https://dominickm.com" rel="nofollow">Mike&#39;s Blog</a></p>]]>
  </itunes:summary>
</item>
<item>
  <title>636: Red Hat's James Huang</title>
  <link>https://coder.show/636</link>
  <guid isPermaLink="false">21937e2f-637d-493a-88a8-90e81c016e7c</guid>
  <pubDate>Fri, 19 Dec 2025 04:00:00 -0500</pubDate>
  <author>The Mad Botter</author>
  <enclosure url="https://aphid.fireside.fm/d/1437767933/b44de5fa-47c1-4e94-bf9e-c72f8d1c8f5d/21937e2f-637d-493a-88a8-90e81c016e7c.mp3" length="30174340" type="audio/mpeg"/>
  <itunes:episodeType>full</itunes:episodeType>
  <itunes:author>The Mad Botter</itunes:author>
  <itunes:subtitle>
In this episode of the Coder Radio (Coder.show) network, Michael Dominick sits down with James Huang, Senior Product Manager of AI and High Performance Computing at Red Hat, to discuss the intersection of enterprise-grade Linux and the rapidly evolving world of artificial intelligence.

</itunes:subtitle>
  <itunes:duration>20:53</itunes:duration>
  <itunes:explicit>no</itunes:explicit>
  <itunes:image href="https://media24.fireside.fm/file/fireside-images-2024/podcasts/images/b/b44de5fa-47c1-4e94-bf9e-c72f8d1c8f5d/episodes/2/21937e2f-637d-493a-88a8-90e81c016e7c/cover.jpg?v=1"/>
  <description>Links
James on LinkedIn (https://www.linkedin.com/in/jahuang/)
Mike on LinkedIn (https://www.linkedin.com/in/dominucco/)
Mike's Blog (https://dominickm.com)
Show on Discord (https://discord.com/invite/k8e7gKUpEp)
Alice Promo (https://go.alice.dev/data-migration-offer-hands-on)
AI on Red Hat Enterprise Linux (RHEL)
Trust and Stability: RHEL provides the mission-critical foundation needed for workloads where security and reliability cannot be compromised.
Predictive vs. Generative: Acknowledging the hype of GenAI while maintaining support for traditional machine learning algorithms.
Determinism: The challenge of bringing consistency and security to emerging AI technologies in production environments.
Rama-Llama &amp;amp; Containerization
Developer Simplicity: Rama-Llama helps developers run local LLMs easily without being "locked in" to specific engines; it supports Podman, Docker, and various inference engines like Llama.cpp and Whisper.cpp.
Production Path: The tool is designed to "fade away" after helping package the model and stack into a container that can be deployed directly to Kubernetes.
Behind the Firewall: Addressing the needs of industries (like aircraft maintenance) that require AI to stay strictly on-premises.
Enterprise AI Infrastructure
Red Hat AI: A commercial product offering tools for model customization, including pre-training, fine-tuning, and RAG (Retrieval-Augmented Generation).
Inference Engines: James highlights the difference between Llama.cpp (for smaller/edge hardware) and vLLM, which has become the enterprise standard for multi-GPU data center inferencing. 
</description>
  <itunes:keywords>Artificial Intelligence &amp; Machine Learning, Open Source Software, AI, FOSS, Cloud Native &amp; Containers</itunes:keywords>
  <content:encoded>
    <![CDATA[<p>Links<br>
<a href="https://www.linkedin.com/in/jahuang/" rel="nofollow">James on LinkedIn</a><br>
<a href="https://www.linkedin.com/in/dominucco/" rel="nofollow">Mike on LinkedIn</a><br>
<a href="https://dominickm.com" rel="nofollow">Mike&#39;s Blog</a><br>
<a href="https://discord.com/invite/k8e7gKUpEp" rel="nofollow">Show on Discord</a></p>

<p><a href="https://go.alice.dev/data-migration-offer-hands-on" rel="nofollow">Alice Promo</a></p>

<ol>
<li>AI on Red Hat Enterprise Linux (RHEL)</li>
</ol>

<p>Trust and Stability: RHEL provides the mission-critical foundation needed for workloads where security and reliability cannot be compromised.</p>

<p>Predictive vs. Generative: Acknowledging the hype of GenAI while maintaining support for traditional machine learning algorithms.</p>

<p>Determinism: The challenge of bringing consistency and security to emerging AI technologies in production environments.</p>

<ol>
<li>Rama-Llama &amp; Containerization</li>
</ol>

<p>Developer Simplicity: Rama-Llama helps developers run local LLMs easily without being &quot;locked in&quot; to specific engines; it supports Podman, Docker, and various inference engines like Llama.cpp and Whisper.cpp.</p>

<p>Production Path: The tool is designed to &quot;fade away&quot; after helping package the model and stack into a container that can be deployed directly to Kubernetes.</p>

<p>Behind the Firewall: Addressing the needs of industries (like aircraft maintenance) that require AI to stay strictly on-premises.</p>

<ol>
<li>Enterprise AI Infrastructure</li>
</ol>

<p>Red Hat AI: A commercial product offering tools for model customization, including pre-training, fine-tuning, and RAG (Retrieval-Augmented Generation).</p>

<p>Inference Engines: James highlights the difference between Llama.cpp (for smaller/edge hardware) and vLLM, which has become the enterprise standard for multi-GPU data center inferencing.</p>]]>
  </content:encoded>
  <itunes:summary>
    <![CDATA[<p>Links<br>
<a href="https://www.linkedin.com/in/jahuang/" rel="nofollow">James on LinkedIn</a><br>
<a href="https://www.linkedin.com/in/dominucco/" rel="nofollow">Mike on LinkedIn</a><br>
<a href="https://dominickm.com" rel="nofollow">Mike&#39;s Blog</a><br>
<a href="https://discord.com/invite/k8e7gKUpEp" rel="nofollow">Show on Discord</a></p>

<p><a href="https://go.alice.dev/data-migration-offer-hands-on" rel="nofollow">Alice Promo</a></p>

<ol>
<li>AI on Red Hat Enterprise Linux (RHEL)</li>
</ol>

<p>Trust and Stability: RHEL provides the mission-critical foundation needed for workloads where security and reliability cannot be compromised.</p>

<p>Predictive vs. Generative: Acknowledging the hype of GenAI while maintaining support for traditional machine learning algorithms.</p>

<p>Determinism: The challenge of bringing consistency and security to emerging AI technologies in production environments.</p>

<ol>
<li>Rama-Llama &amp; Containerization</li>
</ol>

<p>Developer Simplicity: Rama-Llama helps developers run local LLMs easily without being &quot;locked in&quot; to specific engines; it supports Podman, Docker, and various inference engines like Llama.cpp and Whisper.cpp.</p>

<p>Production Path: The tool is designed to &quot;fade away&quot; after helping package the model and stack into a container that can be deployed directly to Kubernetes.</p>

<p>Behind the Firewall: Addressing the needs of industries (like aircraft maintenance) that require AI to stay strictly on-premises.</p>

<ol>
<li>Enterprise AI Infrastructure</li>
</ol>

<p>Red Hat AI: A commercial product offering tools for model customization, including pre-training, fine-tuning, and RAG (Retrieval-Augmented Generation).</p>

<p>Inference Engines: James highlights the difference between Llama.cpp (for smaller/edge hardware) and vLLM, which has become the enterprise standard for multi-GPU data center inferencing.</p>]]>
  </itunes:summary>
</item>
  </channel>
</rss>
