<?xml version="1.0" encoding="UTF-8"?>
<urlset xmlns="http://www.sitemaps.org/schemas/sitemap/0.9" xmlns:image="http://www.google.com/schemas/sitemap-image/1.1" xmlns:xhtml="http://www.w3.org/1999/xhtml" xmlns:video="http://www.google.com/schemas/sitemap-video/1.1">
  <url>
    <loc>https://www.nota.ai/community</loc>
    <changefreq>daily</changefreq>
    <priority>0.75</priority>
    <lastmod>2026-03-31</lastmod>
  </url>
  <url>
    <loc>https://www.nota.ai/community/the-real-reason-turboquant-shook-the-market-ai-optimization-has-gone-mainstream</loc>
    <changefreq>monthly</changefreq>
    <priority>0.5</priority>
    <lastmod>2026-03-31</lastmod>
    <image:image>
      <image:loc>https://images.squarespace-cdn.com/content/v1/61af5d2d7a632327e6590167/c1239f42-73b9-4c1f-89f9-f13d6909ba29/%E1%84%8B%E1%85%B5%E1%84%8C%E1%85%A2%E1%84%92%E1%85%AE%E1%86%AB1-0+%281%29.jpg</image:loc>
      <image:title>Tech Blog - The Real Reason TurboQuant Shook the Market: AI Optimization Has Gone Mainstream - Jaehoon Lee Technical Content Manager, Nota AI</image:title>
    </image:image>
    <image:image>
      <image:loc>https://images.squarespace-cdn.com/content/v1/61af5d2d7a632327e6590167/bdb3dd4f-d04e-44d6-afce-6aaf5d2e50f8/TurboQuant+Content+Figure_01.png</image:loc>
      <image:title>Tech Blog - The Real Reason TurboQuant Shook the Market: AI Optimization Has Gone Mainstream - 돋보이게 만드세요</image:title>
      <image:caption>Figure 1: Electricity generation in China vs. the U.S. (2008–2024). China's output has roughly tripled while the U.S. has plateaued, underscoring the need for energy-efficiency gains in the AI era. (Source: AI+HW 2035)</image:caption>
    </image:image>
    <image:image>
      <image:loc>https://images.squarespace-cdn.com/content/v1/61af5d2d7a632327e6590167/23126b64-c822-4e6c-a1fc-cc261c704518/TurboQuant+Content+Figure_02.png</image:loc>
      <image:title>Tech Blog - The Real Reason TurboQuant Shook the Market: AI Optimization Has Gone Mainstream - 돋보이게 만드세요</image:title>
      <image:caption>Figure 2: Throughput comparison by precision when serving DeepSeek-R1 on NVIDIA HGX B200. The NVFP4 (4-bit) + MTP configuration achieves approximately 2× the throughput of FP8. (Source: NVIDIA Technical Blog)</image:caption>
    </image:image>
    <image:image>
      <image:loc>https://images.squarespace-cdn.com/content/v1/61af5d2d7a632327e6590167/98588942-16cf-4c73-ab1a-7d2822d9b751/TurboQuant+Content+Figure_03.png</image:loc>
      <image:title>Tech Blog - The Real Reason TurboQuant Shook the Market: AI Optimization Has Gone Mainstream - 돋보이게 만드세요</image:title>
      <image:caption>Figure 3: LongBench accuracy comparison across KV cache quantization methods. TurboQuant maintains the same score (50.06) as the 16-bit Full Cache even at 3.5-bit compression. (Source: Google Research)</image:caption>
    </image:image>
  </url>
  <url>
    <loc>https://www.nota.ai/community/gtc-2026-recap-the-trillion-dollar-inference-race-begins-how-nota-ai-fills-the-gap</loc>
    <changefreq>monthly</changefreq>
    <priority>0.5</priority>
    <lastmod>2026-03-31</lastmod>
    <image:image>
      <image:loc>https://images.squarespace-cdn.com/content/v1/61af5d2d7a632327e6590167/c1239f42-73b9-4c1f-89f9-f13d6909ba29/%E1%84%8B%E1%85%B5%E1%84%8C%E1%85%A2%E1%84%92%E1%85%AE%E1%86%AB1-0+%281%29.jpg</image:loc>
      <image:title>Tech Blog - [GTC 2026 Recap] The Trillion-Dollar Inference Race Begins: How Nota AI Fills the Gap - Jaehoon Lee Technical Content Manager, Nota AI</image:title>
    </image:image>
    <image:image>
      <image:loc>https://images.squarespace-cdn.com/content/v1/61af5d2d7a632327e6590167/3059b7ea-a974-444e-90dc-d7f9f9db212c/GTC+2026_01.png</image:loc>
      <image:title>Tech Blog - [GTC 2026 Recap] The Trillion-Dollar Inference Race Begins: How Nota AI Fills the Gap - 돋보이게 만드세요</image:title>
      <image:caption>Figure 1: A slide illustrating the 10,000x increase in inference compute demand over the two years since ChatGPT's launch (Source: GTC 2026 Keynote)</image:caption>
    </image:image>
    <image:image>
      <image:loc>https://images.squarespace-cdn.com/content/v1/61af5d2d7a632327e6590167/0560154c-c3b0-4fe3-a99c-b6dadc40d98a/GTC+2026_02.png</image:loc>
      <image:title>Tech Blog - [GTC 2026 Recap] The Trillion-Dollar Inference Race Begins: How Nota AI Fills the Gap - 돋보이게 만드세요</image:title>
      <image:caption>Figure 2: The NemoClaw architecture, unifying agent creation, deployment, and governance in a single framework (Source: GTC 2026 Keynote)</image:caption>
    </image:image>
    <image:image>
      <image:loc>https://images.squarespace-cdn.com/content/v1/61af5d2d7a632327e6590167/f7806ba3-1f1a-491e-8b57-e5903ff49e11/GTC+2026_03.png</image:loc>
      <image:title>Tech Blog - [GTC 2026 Recap] The Trillion-Dollar Inference Race Begins: How Nota AI Fills the Gap - 돋보이게 만드세요</image:title>
      <image:caption>Figure 3: Robots, autonomous vehicles, and industrial heavy machinery gathered on the keynote stage (Source: GTC 2026 Keynote)</image:caption>
    </image:image>
    <image:image>
      <image:loc>https://images.squarespace-cdn.com/content/v1/61af5d2d7a632327e6590167/1f3d2fd7-ecfa-460c-970d-b723193dc084/GTC+2026_04.png</image:loc>
      <image:title>Tech Blog - [GTC 2026 Recap] The Trillion-Dollar Inference Race Begins: How Nota AI Fills the Gap - 돋보이게 만드세요</image:title>
      <image:caption>Figure 4: The disaggregated architecture with Rubin GPUs handling prefill and Groq 3 LPUs handling decode (Source: GTC 2026 Keynote)</image:caption>
    </image:image>
  </url>
  <url>
    <loc>https://www.nota.ai/community/genai-everywhere-the-future-of-edge-ai-optimization-with-the-new-netspresso</loc>
    <changefreq>monthly</changefreq>
    <priority>0.5</priority>
    <lastmod>2026-03-31</lastmod>
    <image:image>
      <image:loc>https://images.squarespace-cdn.com/content/v1/61af5d2d7a632327e6590167/2b17235e-03b5-4c3a-b9e5-95f1046a7a09/image+%285%29.png</image:loc>
      <image:title>Tech Blog - GenAI Everywhere: The Future of Edge AI Optimization with the New NetsPresso® - NP Product Team, Nota AI</image:title>
    </image:image>
    <image:image>
      <image:loc>https://images.squarespace-cdn.com/content/v1/61af5d2d7a632327e6590167/ae4b298e-00a2-4ac9-a22e-31bfa7bc0779/7C203010.jpg</image:loc>
      <image:title>Tech Blog - GenAI Everywhere: The Future of Edge AI Optimization with the New NetsPresso® - 돋보이게 만드세요</image:title>
      <image:caption>Figure 1. Device Farm at Nota AI Seoul office, featuring devices where AI models have been successfully ported by Nota AI.</image:caption>
    </image:image>
    <image:image>
      <image:loc>https://images.squarespace-cdn.com/content/v1/61af5d2d7a632327e6590167/86a51f71-7c84-4bd8-9fe8-cf2bb0f548ce/banner_eng.png</image:loc>
      <image:title>Tech Blog - GenAI Everywhere: The Future of Edge AI Optimization with the New NetsPresso® - 돋보이게 만드세요</image:title>
      <image:caption>무엇이든, 온라인에서 스토리를 전달하는 방식이 큰 차이를 만들어낼 수 있습니다.</image:caption>
    </image:image>
    <image:image>
      <image:loc>https://images.squarespace-cdn.com/content/v1/61af5d2d7a632327e6590167/8fd17cf2-beb2-41a2-81f6-d2b72412759d/Frame+1597885558_5.png</image:loc>
      <image:title>Tech Blog - GenAI Everywhere: The Future of Edge AI Optimization with the New NetsPresso® - 돋보이게 만드세요</image:title>
      <image:caption>무엇이든, 온라인에서 스토리를 전달하는 방식이 큰 차이를 만들어낼 수 있습니다.</image:caption>
    </image:image>
  </url>
  <url>
    <loc>https://www.nota.ai/community/notamoequantization-an-moe-specific-quantization-method-for-solar-open-100b</loc>
    <changefreq>monthly</changefreq>
    <priority>0.5</priority>
    <lastmod>2026-03-26</lastmod>
    <image:image>
      <image:loc>https://images.squarespace-cdn.com/content/v1/61af5d2d7a632327e6590167/156179b6-aa91-4f31-8db9-9dca1eae4a13/%E3%85%87%E3%84%B9%E3%85%87%E3%85%87.png</image:loc>
      <image:title>Tech Blog - NotaMoEQuantization: An MoE-Specific Quantization Method for Solar-Open-100B - Hancheol Park, Ph. D. AI Research Engineer, Nota AI</image:title>
    </image:image>
    <image:image>
      <image:loc>https://images.squarespace-cdn.com/content/v1/61af5d2d7a632327e6590167/9f809ab9-49d5-4779-88f4-83a4348faa70/142.+Tairen+Piao.jpg</image:loc>
      <image:title>Tech Blog - NotaMoEQuantization: An MoE-Specific Quantization Method for Solar-Open-100B - Tairen Piao AI Research Engineer, Nota AI</image:title>
    </image:image>
    <image:image>
      <image:loc>https://images.squarespace-cdn.com/content/v1/61af5d2d7a632327e6590167/8033a3ad-73e8-405d-a99f-2d422ff6793d/37.+%E1%84%80%E1%85%B5%E1%86%B7%E1%84%90%E1%85%A2%E1%84%92%E1%85%A9.jpg</image:loc>
      <image:title>Tech Blog - NotaMoEQuantization: An MoE-Specific Quantization Method for Solar-Open-100B - Tae-Ho Kim CTO &amp; Co-Founder, Nota AI</image:title>
    </image:image>
    <image:image>
      <image:loc>https://images.squarespace-cdn.com/content/v1/61af5d2d7a632327e6590167/183da0e7-52e1-4bd3-b95b-520f658a4421/2.jpg</image:loc>
      <image:title>Tech Blog - NotaMoEQuantization: An MoE-Specific Quantization Method for Solar-Open-100B - ✔️ Resource : The official quantized model of Solar-Open-100B, which passed the first round of South Korea’s Sovereign AI Foundation Model project: https://huggingface.co/nota-ai/Solar-Open-100B-NotaMoEQuant-Int4</image:title>
    </image:image>
    <image:image>
      <image:loc>https://images.squarespace-cdn.com/content/v1/61af5d2d7a632327e6590167/c4c35153-64fc-4807-a708-1d4ef3d5cec8/%E1%84%8B%E1%85%B5%E1%84%86%E1%85%B5%E1%84%8C%E1%85%B5+%E1%84%8C%E1%85%A1%E1%84%85%E1%85%AD.png</image:loc>
      <image:title>Tech Blog - NotaMoEQuantization: An MoE-Specific Quantization Method for Solar-Open-100B - 돋보이게 만드세요</image:title>
      <image:caption>무엇이든, 온라인에서 스토리를 전달하는 방식이 큰 차이를 만들어낼 수 있습니다.</image:caption>
    </image:image>
    <image:image>
      <image:loc>https://images.squarespace-cdn.com/content/v1/61af5d2d7a632327e6590167/8e3970e0-610f-4bae-ab9a-a9180dadaa00/%E1%84%89%E1%85%B3%E1%84%8F%E1%85%B3%E1%84%85%E1%85%B5%E1%86%AB%E1%84%89%E1%85%A3%E1%86%BA+2026-03-13+%E1%84%8B%E1%85%A9%E1%84%92%E1%85%AE+8.45.34.png</image:loc>
      <image:title>Tech Blog - NotaMoEQuantization: An MoE-Specific Quantization Method for Solar-Open-100B - 돋보이게 만드세요</image:title>
      <image:caption>무엇이든, 온라인에서 스토리를 전달하는 방식이 큰 차이를 만들어낼 수 있습니다.</image:caption>
    </image:image>
    <image:image>
      <image:loc>https://images.squarespace-cdn.com/content/v1/61af5d2d7a632327e6590167/08ee8eec-1e74-49bc-9cb7-b54314b9d6d7/%E1%84%89%E1%85%B3%E1%84%8F%E1%85%B3%E1%84%85%E1%85%B5%E1%86%AB%E1%84%89%E1%85%A3%E1%86%BA+2026-03-13+%E1%84%8B%E1%85%A9%E1%84%92%E1%85%AE+8.45.24.png</image:loc>
      <image:title>Tech Blog - NotaMoEQuantization: An MoE-Specific Quantization Method for Solar-Open-100B - 돋보이게 만드세요</image:title>
      <image:caption>무엇이든, 온라인에서 스토리를 전달하는 방식이 큰 차이를 만들어낼 수 있습니다.</image:caption>
    </image:image>
    <image:image>
      <image:loc>https://images.squarespace-cdn.com/content/v1/61af5d2d7a632327e6590167/26c46733-cdc7-42c7-b626-8bc27ac87f84/%E1%84%89%E1%85%B3%E1%84%8F%E1%85%B3%E1%84%85%E1%85%B5%E1%86%AB%E1%84%89%E1%85%A3%E1%86%BA+2026-03-13+%E1%84%8B%E1%85%A9%E1%84%92%E1%85%AE+8.42.14.png</image:loc>
      <image:title>Tech Blog - NotaMoEQuantization: An MoE-Specific Quantization Method for Solar-Open-100B - 돋보이게 만드세요</image:title>
      <image:caption>무엇이든, 온라인에서 스토리를 전달하는 방식이 큰 차이를 만들어낼 수 있습니다.</image:caption>
    </image:image>
    <image:image>
      <image:loc>https://images.squarespace-cdn.com/content/v1/61af5d2d7a632327e6590167/fd3531b4-3bb4-43dd-baf7-14dc82d0eacd/19.png</image:loc>
      <image:title>Tech Blog - NotaMoEQuantization: An MoE-Specific Quantization Method for Solar-Open-100B - 돋보이게 만드세요</image:title>
      <image:caption>무엇이든, 온라인에서 스토리를 전달하는 방식이 큰 차이를 만들어낼 수 있습니다.</image:caption>
    </image:image>
    <image:image>
      <image:loc>https://images.squarespace-cdn.com/content/v1/61af5d2d7a632327e6590167/8d18f17d-09c9-4f96-b9ce-a3bde83cd3be/%E1%84%89%E1%85%B3%E1%84%8F%E1%85%B3%E1%84%85%E1%85%B5%E1%86%AB%E1%84%89%E1%85%A3%E1%86%BA+2026-03-13+%E1%84%8B%E1%85%A9%E1%84%92%E1%85%AE+8.42.30.png</image:loc>
      <image:title>Tech Blog - NotaMoEQuantization: An MoE-Specific Quantization Method for Solar-Open-100B - 돋보이게 만드세요</image:title>
      <image:caption>무엇이든, 온라인에서 스토리를 전달하는 방식이 큰 차이를 만들어낼 수 있습니다.</image:caption>
    </image:image>
  </url>
  <url>
    <loc>https://www.nota.ai/community/ergo-efficient-high-resolution-visual-understanding-for-vision-language-models</loc>
    <changefreq>monthly</changefreq>
    <priority>0.5</priority>
    <lastmod>2026-03-11</lastmod>
    <image:image>
      <image:loc>https://images.squarespace-cdn.com/content/v1/61af5d2d7a632327e6590167/8876bcb3-57ae-442e-978c-27a2d26f4398/Group+40153.jpg</image:loc>
      <image:title>Tech Blog - ERGO: Efficient High-Resolution Visual Understanding for Vision-Language Models - Jewon Lee | Wooksu Shin | Seungmin Yang | Ki-Ung Song | Donguk Lim | Jaeyeon Kim | Tae-Ho Kim |  Bo-Kyeong Kim EdgeFM Team, Nota AI</image:title>
    </image:image>
    <image:image>
      <image:loc>https://images.squarespace-cdn.com/content/v1/61af5d2d7a632327e6590167/eb27c867-8ed3-41b4-ac5f-3f8ec0b36e82/comparison_with_prior.png</image:loc>
      <image:title>Tech Blog - ERGO: Efficient High-Resolution Visual Understanding for Vision-Language Models - Make it stand out</image:title>
      <image:caption>Whatever it is, the way you tell your story online can make all the difference.</image:caption>
    </image:image>
    <image:image>
      <image:loc>https://images.squarespace-cdn.com/content/v1/61af5d2d7a632327e6590167/5521ce57-e96c-436a-a6a6-e018a1024c7a/training_pipeline.png</image:loc>
      <image:title>Tech Blog - ERGO: Efficient High-Resolution Visual Understanding for Vision-Language Models - Make it stand out</image:title>
      <image:caption>Whatever it is, the way you tell your story online can make all the difference.</image:caption>
    </image:image>
    <image:image>
      <image:loc>https://images.squarespace-cdn.com/content/v1/61af5d2d7a632327e6590167/ecff5c35-c823-4fa8-94a5-ca19406a0d07/%E1%84%89%E1%85%B3%E1%84%8F%E1%85%B3%E1%84%85%E1%85%B5%E1%86%AB%E1%84%89%E1%85%A3%E1%86%BA+2026-02-26+%E1%84%8B%E1%85%A9%E1%84%92%E1%85%AE+1.19.49.png</image:loc>
      <image:title>Tech Blog - ERGO: Efficient High-Resolution Visual Understanding for Vision-Language Models - Make it stand out</image:title>
      <image:caption>Whatever it is, the way you tell your story online can make all the difference.</image:caption>
    </image:image>
    <image:image>
      <image:loc>https://images.squarespace-cdn.com/content/v1/61af5d2d7a632327e6590167/0c32dd11-c429-4fdf-b05d-d076eaed42a3/%E1%84%89%E1%85%B3%E1%84%8F%E1%85%B3%E1%84%85%E1%85%B5%E1%86%AB%E1%84%89%E1%85%A3%E1%86%BA+2026-02-26+%E1%84%8B%E1%85%A9%E1%84%92%E1%85%AE+1.58.35.png</image:loc>
      <image:title>Tech Blog - ERGO: Efficient High-Resolution Visual Understanding for Vision-Language Models</image:title>
    </image:image>
    <image:image>
      <image:loc>https://images.squarespace-cdn.com/content/v1/61af5d2d7a632327e6590167/0ae4cba7-fd34-4092-81dd-8d27ed874925/image.png</image:loc>
      <image:title>Tech Blog - ERGO: Efficient High-Resolution Visual Understanding for Vision-Language Models - Make it stand out</image:title>
      <image:caption>Whatever it is, the way you tell your story online can make all the difference.</image:caption>
    </image:image>
    <image:image>
      <image:loc>https://images.squarespace-cdn.com/content/v1/61af5d2d7a632327e6590167/0a0675d2-6b05-4e54-9e7e-c422795697a2/image+%281%29.png</image:loc>
      <image:title>Tech Blog - ERGO: Efficient High-Resolution Visual Understanding for Vision-Language Models - Make it stand out</image:title>
      <image:caption>Whatever it is, the way you tell your story online can make all the difference.</image:caption>
    </image:image>
    <image:image>
      <image:loc>https://images.squarespace-cdn.com/content/v1/61af5d2d7a632327e6590167/78887dc9-b35e-4ca0-84da-a4b82c2cdd1c/image+%282%29.png</image:loc>
      <image:title>Tech Blog - ERGO: Efficient High-Resolution Visual Understanding for Vision-Language Models - 돋보이게 만드세요</image:title>
      <image:caption>무엇이든, 온라인에서 스토리를 전달하는 방식이 큰 차이를 만들어낼 수 있습니다.</image:caption>
    </image:image>
  </url>
  <url>
    <loc>https://www.nota.ai/community/where-do-llms-encode-the-knowledge-to-assess-the-ambiguity-6zjbl-tmjns-l25lf-grh8b</loc>
    <changefreq>monthly</changefreq>
    <priority>0.5</priority>
    <lastmod>2026-03-31</lastmod>
    <image:image>
      <image:loc>https://images.squarespace-cdn.com/content/v1/61af5d2d7a632327e6590167/c4f6fbb6-5fa7-427f-a860-cfc8a9edc12a/image+%2811%29.png</image:loc>
      <image:title>Tech Blog - NVIDIA Blackwell; The Impact of NVFP4 For LLM Inference - Seungmin Yang EdgeFM Lead, Nota AI</image:title>
    </image:image>
    <image:image>
      <image:loc>https://images.squarespace-cdn.com/content/v1/61af5d2d7a632327e6590167/1888fd76-47e3-4e65-8751-7d5c6631d78f/%EC%8A%A4%ED%81%AC%EB%A6%B0%EC%83%B7+2025-10-21+170927.png</image:loc>
      <image:title>Tech Blog - NVIDIA Blackwell; The Impact of NVFP4 For LLM Inference - Make it stand out</image:title>
      <image:caption>Whatever it is, the way you tell your story online can make all the difference.</image:caption>
    </image:image>
    <image:image>
      <image:loc>https://images.squarespace-cdn.com/content/v1/61af5d2d7a632327e6590167/9b5fee13-ec86-4bba-b2c8-7e5b99802ad0/%ED%91%9C+01.png</image:loc>
      <image:title>Tech Blog - NVIDIA Blackwell; The Impact of NVFP4 For LLM Inference - Make it stand out</image:title>
      <image:caption>Whatever it is, the way you tell your story online can make all the difference.</image:caption>
    </image:image>
    <image:image>
      <image:loc>https://images.squarespace-cdn.com/content/v1/61af5d2d7a632327e6590167/e7f65ef8-91f1-4828-8bb1-dbcc3e042f6e/%ED%91%9C+02.png</image:loc>
      <image:title>Tech Blog - NVIDIA Blackwell; The Impact of NVFP4 For LLM Inference - Make it stand out</image:title>
      <image:caption>Whatever it is, the way you tell your story online can make all the difference.</image:caption>
    </image:image>
    <image:image>
      <image:loc>https://images.squarespace-cdn.com/content/v1/61af5d2d7a632327e6590167/12fcd9e8-23bb-4c66-b759-9d79691ce0e1/%ED%91%9C3.png</image:loc>
      <image:title>Tech Blog - NVIDIA Blackwell; The Impact of NVFP4 For LLM Inference - Make it stand out</image:title>
      <image:caption>Whatever it is, the way you tell your story online can make all the difference.</image:caption>
    </image:image>
    <image:image>
      <image:loc>https://images.squarespace-cdn.com/content/v1/61af5d2d7a632327e6590167/559e5d49-0862-4e88-a602-f7365490018d/%ED%91%9C4.png</image:loc>
      <image:title>Tech Blog - NVIDIA Blackwell; The Impact of NVFP4 For LLM Inference - Make it stand out</image:title>
      <image:caption>Whatever it is, the way you tell your story online can make all the difference.</image:caption>
    </image:image>
    <image:image>
      <image:loc>https://images.squarespace-cdn.com/content/v1/61af5d2d7a632327e6590167/9bf5d214-8f7a-4ca1-b5a0-adb270aa1f61/%ED%91%9C+05.png</image:loc>
      <image:title>Tech Blog - NVIDIA Blackwell; The Impact of NVFP4 For LLM Inference - Make it stand out</image:title>
      <image:caption>Whatever it is, the way you tell your story online can make all the difference.</image:caption>
    </image:image>
    <image:image>
      <image:loc>https://images.squarespace-cdn.com/content/v1/61af5d2d7a632327e6590167/b79a8a65-31db-43f8-8c66-fa44f3ae4c11/TTFT+01.png</image:loc>
      <image:title>Tech Blog - NVIDIA Blackwell; The Impact of NVFP4 For LLM Inference - Make it stand out</image:title>
      <image:caption>TTFT-Batch size graph with fixed input token length (1024). The figure on the left side is results on A100 and right side is that on RTX PRO 6000.</image:caption>
    </image:image>
    <image:image>
      <image:loc>https://images.squarespace-cdn.com/content/v1/61af5d2d7a632327e6590167/5880dbef-5aa9-423c-ba94-863ae1e4a4a1/TTFT+02.png</image:loc>
      <image:title>Tech Blog - NVIDIA Blackwell; The Impact of NVFP4 For LLM Inference - Make it stand out</image:title>
      <image:caption>TTFT-Input Token Length graph with fixed batch size (16). The figure on the left side is results on A100 and right side is that on RTX PRO 6000.</image:caption>
    </image:image>
    <image:image>
      <image:loc>https://images.squarespace-cdn.com/content/v1/61af5d2d7a632327e6590167/428d9070-9304-4186-a9e2-d460886d5f03/TPOT1.png</image:loc>
      <image:title>Tech Blog - NVIDIA Blackwell; The Impact of NVFP4 For LLM Inference - Make it stand out</image:title>
      <image:caption>TPOT-Batch size graph with fixed input token length (1024). The figure on the left side is results on A100 and right side is that on RTX PRO 6000.</image:caption>
    </image:image>
    <image:image>
      <image:loc>https://images.squarespace-cdn.com/content/v1/61af5d2d7a632327e6590167/f0bebcef-cdb6-4d40-97dd-544ae199a44f/TPOT2.png</image:loc>
      <image:title>Tech Blog - NVIDIA Blackwell; The Impact of NVFP4 For LLM Inference - Make it stand out</image:title>
      <image:caption>TPOT-Input Token Length graph with fixed batch size (16). The figure on the left side is results on A100 and right side is that on RTX PRO 6000.</image:caption>
    </image:image>
    <image:image>
      <image:loc>https://images.squarespace-cdn.com/content/v1/61af5d2d7a632327e6590167/7b689b91-56d1-440e-af8b-0d9792034deb/%EA%B7%B8%EB%A6%BC3.png</image:loc>
      <image:title>Tech Blog - NVIDIA Blackwell; The Impact of NVFP4 For LLM Inference - Make it stand out</image:title>
      <image:caption>Throughput-Batch size graph with fixed input token length (1024). Solid lines indicate best cases for RTX PRO 6000 and dot lines means best cases for A100.</image:caption>
    </image:image>
    <image:image>
      <image:loc>https://images.squarespace-cdn.com/content/v1/61af5d2d7a632327e6590167/5ff8b7e2-623c-4103-b59e-fe0628e562bb/%EA%B7%B8%EB%A6%BC4.png</image:loc>
      <image:title>Tech Blog - NVIDIA Blackwell; The Impact of NVFP4 For LLM Inference - Make it stand out</image:title>
      <image:caption>Throughput-Input Token Length graph with fixed batch size (16). Solid lines indicate best cases for RTX PRO 6000 and dot lines means best cases for A100.</image:caption>
    </image:image>
    <image:image>
      <image:loc>https://images.squarespace-cdn.com/content/v1/61af5d2d7a632327e6590167/96b2fb36-675c-404c-af2e-6e5ada33869c/benchmark_winogrande.png</image:loc>
      <image:title>Tech Blog - NVIDIA Blackwell; The Impact of NVFP4 For LLM Inference - Make it stand out</image:title>
      <image:caption>Whatever it is, the way you tell your story online can make all the difference.</image:caption>
    </image:image>
    <image:image>
      <image:loc>https://images.squarespace-cdn.com/content/v1/61af5d2d7a632327e6590167/e08b9db4-7c7e-473e-9546-ba64e7e13de2/benchmark_piqa.png</image:loc>
      <image:title>Tech Blog - NVIDIA Blackwell; The Impact of NVFP4 For LLM Inference - Make it stand out</image:title>
      <image:caption>Whatever it is, the way you tell your story online can make all the difference.</image:caption>
    </image:image>
    <image:image>
      <image:loc>https://images.squarespace-cdn.com/content/v1/61af5d2d7a632327e6590167/3afb834c-7f28-4f1b-8f51-edcdc1606288/benchmark_mmlu_5_shot.png</image:loc>
      <image:title>Tech Blog - NVIDIA Blackwell; The Impact of NVFP4 For LLM Inference - Make it stand out</image:title>
      <image:caption>Whatever it is, the way you tell your story online can make all the difference.</image:caption>
    </image:image>
    <image:image>
      <image:loc>https://images.squarespace-cdn.com/content/v1/61af5d2d7a632327e6590167/0270468a-7132-4cbd-8d69-5d39a4fb24cb/benchmark_mmlu_0_shot.png</image:loc>
      <image:title>Tech Blog - NVIDIA Blackwell; The Impact of NVFP4 For LLM Inference - Make it stand out</image:title>
      <image:caption>Whatever it is, the way you tell your story online can make all the difference.</image:caption>
    </image:image>
    <image:image>
      <image:loc>https://images.squarespace-cdn.com/content/v1/61af5d2d7a632327e6590167/f1eecd4d-dafb-4751-a375-7494d3d1fbec/benchmark_hellaswag.png</image:loc>
      <image:title>Tech Blog - NVIDIA Blackwell; The Impact of NVFP4 For LLM Inference - Make it stand out</image:title>
      <image:caption>Whatever it is, the way you tell your story online can make all the difference.</image:caption>
    </image:image>
  </url>
  <url>
    <loc>https://www.nota.ai/community/where-do-llms-encode-the-knowledge-to-assess-the-ambiguity-6zjbl-tmjns-l25lf</loc>
    <changefreq>monthly</changefreq>
    <priority>0.5</priority>
    <lastmod>2025-11-05</lastmod>
    <image:image>
      <image:loc>https://images.squarespace-cdn.com/content/v1/61af5d2d7a632327e6590167/c4f6fbb6-5fa7-427f-a860-cfc8a9edc12a/image+%2811%29.png</image:loc>
      <image:title>Tech Blog - NVIDIA Blackwell; The Impact of NVFP4 For LLM Inference - Seungmin Yang EdgeFM Lead, Nota AI</image:title>
    </image:image>
    <image:image>
      <image:loc>https://images.squarespace-cdn.com/content/v1/61af5d2d7a632327e6590167/1888fd76-47e3-4e65-8751-7d5c6631d78f/%EC%8A%A4%ED%81%AC%EB%A6%B0%EC%83%B7+2025-10-21+170927.png</image:loc>
      <image:title>Tech Blog - NVIDIA Blackwell; The Impact of NVFP4 For LLM Inference - Make it stand out</image:title>
      <image:caption>Whatever it is, the way you tell your story online can make all the difference.</image:caption>
    </image:image>
    <image:image>
      <image:loc>https://images.squarespace-cdn.com/content/v1/61af5d2d7a632327e6590167/9b5fee13-ec86-4bba-b2c8-7e5b99802ad0/%ED%91%9C+01.png</image:loc>
      <image:title>Tech Blog - NVIDIA Blackwell; The Impact of NVFP4 For LLM Inference - Make it stand out</image:title>
      <image:caption>Whatever it is, the way you tell your story online can make all the difference.</image:caption>
    </image:image>
    <image:image>
      <image:loc>https://images.squarespace-cdn.com/content/v1/61af5d2d7a632327e6590167/e7f65ef8-91f1-4828-8bb1-dbcc3e042f6e/%ED%91%9C+02.png</image:loc>
      <image:title>Tech Blog - NVIDIA Blackwell; The Impact of NVFP4 For LLM Inference - Make it stand out</image:title>
      <image:caption>Whatever it is, the way you tell your story online can make all the difference.</image:caption>
    </image:image>
    <image:image>
      <image:loc>https://images.squarespace-cdn.com/content/v1/61af5d2d7a632327e6590167/12fcd9e8-23bb-4c66-b759-9d79691ce0e1/%ED%91%9C3.png</image:loc>
      <image:title>Tech Blog - NVIDIA Blackwell; The Impact of NVFP4 For LLM Inference - Make it stand out</image:title>
      <image:caption>Whatever it is, the way you tell your story online can make all the difference.</image:caption>
    </image:image>
    <image:image>
      <image:loc>https://images.squarespace-cdn.com/content/v1/61af5d2d7a632327e6590167/559e5d49-0862-4e88-a602-f7365490018d/%ED%91%9C4.png</image:loc>
      <image:title>Tech Blog - NVIDIA Blackwell; The Impact of NVFP4 For LLM Inference - Make it stand out</image:title>
      <image:caption>Whatever it is, the way you tell your story online can make all the difference.</image:caption>
    </image:image>
    <image:image>
      <image:loc>https://images.squarespace-cdn.com/content/v1/61af5d2d7a632327e6590167/9bf5d214-8f7a-4ca1-b5a0-adb270aa1f61/%ED%91%9C+05.png</image:loc>
      <image:title>Tech Blog - NVIDIA Blackwell; The Impact of NVFP4 For LLM Inference - Make it stand out</image:title>
      <image:caption>Whatever it is, the way you tell your story online can make all the difference.</image:caption>
    </image:image>
    <image:image>
      <image:loc>https://images.squarespace-cdn.com/content/v1/61af5d2d7a632327e6590167/b79a8a65-31db-43f8-8c66-fa44f3ae4c11/TTFT+01.png</image:loc>
      <image:title>Tech Blog - NVIDIA Blackwell; The Impact of NVFP4 For LLM Inference - Make it stand out</image:title>
      <image:caption>TTFT-Batch size graph with fixed input token length (1024). The figure on the left side is results on A100 and right side is that on RTX PRO 6000.</image:caption>
    </image:image>
    <image:image>
      <image:loc>https://images.squarespace-cdn.com/content/v1/61af5d2d7a632327e6590167/5880dbef-5aa9-423c-ba94-863ae1e4a4a1/TTFT+02.png</image:loc>
      <image:title>Tech Blog - NVIDIA Blackwell; The Impact of NVFP4 For LLM Inference - Make it stand out</image:title>
      <image:caption>TTFT-Input Token Length graph with fixed batch size (16). The figure on the left side is results on A100 and right side is that on RTX PRO 6000.</image:caption>
    </image:image>
    <image:image>
      <image:loc>https://images.squarespace-cdn.com/content/v1/61af5d2d7a632327e6590167/428d9070-9304-4186-a9e2-d460886d5f03/TPOT1.png</image:loc>
      <image:title>Tech Blog - NVIDIA Blackwell; The Impact of NVFP4 For LLM Inference - Make it stand out</image:title>
      <image:caption>TPOT-Batch size graph with fixed input token length (1024). The figure on the left side is results on A100 and right side is that on RTX PRO 6000.</image:caption>
    </image:image>
    <image:image>
      <image:loc>https://images.squarespace-cdn.com/content/v1/61af5d2d7a632327e6590167/f0bebcef-cdb6-4d40-97dd-544ae199a44f/TPOT2.png</image:loc>
      <image:title>Tech Blog - NVIDIA Blackwell; The Impact of NVFP4 For LLM Inference - Make it stand out</image:title>
      <image:caption>TPOT-Input Token Length graph with fixed batch size (16). The figure on the left side is results on A100 and right side is that on RTX PRO 6000.</image:caption>
    </image:image>
    <image:image>
      <image:loc>https://images.squarespace-cdn.com/content/v1/61af5d2d7a632327e6590167/7b689b91-56d1-440e-af8b-0d9792034deb/%EA%B7%B8%EB%A6%BC3.png</image:loc>
      <image:title>Tech Blog - NVIDIA Blackwell; The Impact of NVFP4 For LLM Inference - Make it stand out</image:title>
      <image:caption>Throughput-Batch size graph with fixed input token length (1024). Solid lines indicate best cases for RTX PRO 6000 and dot lines means best cases for A100.</image:caption>
    </image:image>
    <image:image>
      <image:loc>https://images.squarespace-cdn.com/content/v1/61af5d2d7a632327e6590167/5ff8b7e2-623c-4103-b59e-fe0628e562bb/%EA%B7%B8%EB%A6%BC4.png</image:loc>
      <image:title>Tech Blog - NVIDIA Blackwell; The Impact of NVFP4 For LLM Inference - Make it stand out</image:title>
      <image:caption>Throughput-Input Token Length graph with fixed batch size (16). Solid lines indicate best cases for RTX PRO 6000 and dot lines means best cases for A100.</image:caption>
    </image:image>
    <image:image>
      <image:loc>https://images.squarespace-cdn.com/content/v1/61af5d2d7a632327e6590167/96b2fb36-675c-404c-af2e-6e5ada33869c/benchmark_winogrande.png</image:loc>
      <image:title>Tech Blog - NVIDIA Blackwell; The Impact of NVFP4 For LLM Inference - Make it stand out</image:title>
      <image:caption>Whatever it is, the way you tell your story online can make all the difference.</image:caption>
    </image:image>
    <image:image>
      <image:loc>https://images.squarespace-cdn.com/content/v1/61af5d2d7a632327e6590167/e08b9db4-7c7e-473e-9546-ba64e7e13de2/benchmark_piqa.png</image:loc>
      <image:title>Tech Blog - NVIDIA Blackwell; The Impact of NVFP4 For LLM Inference - Make it stand out</image:title>
      <image:caption>Whatever it is, the way you tell your story online can make all the difference.</image:caption>
    </image:image>
    <image:image>
      <image:loc>https://images.squarespace-cdn.com/content/v1/61af5d2d7a632327e6590167/3afb834c-7f28-4f1b-8f51-edcdc1606288/benchmark_mmlu_5_shot.png</image:loc>
      <image:title>Tech Blog - NVIDIA Blackwell; The Impact of NVFP4 For LLM Inference - Make it stand out</image:title>
      <image:caption>Whatever it is, the way you tell your story online can make all the difference.</image:caption>
    </image:image>
    <image:image>
      <image:loc>https://images.squarespace-cdn.com/content/v1/61af5d2d7a632327e6590167/0270468a-7132-4cbd-8d69-5d39a4fb24cb/benchmark_mmlu_0_shot.png</image:loc>
      <image:title>Tech Blog - NVIDIA Blackwell; The Impact of NVFP4 For LLM Inference - Make it stand out</image:title>
      <image:caption>Whatever it is, the way you tell your story online can make all the difference.</image:caption>
    </image:image>
    <image:image>
      <image:loc>https://images.squarespace-cdn.com/content/v1/61af5d2d7a632327e6590167/f1eecd4d-dafb-4751-a375-7494d3d1fbec/benchmark_hellaswag.png</image:loc>
      <image:title>Tech Blog - NVIDIA Blackwell; The Impact of NVFP4 For LLM Inference - Make it stand out</image:title>
      <image:caption>Whatever it is, the way you tell your story online can make all the difference.</image:caption>
    </image:image>
  </url>
  <url>
    <loc>https://www.nota.ai/community/where-do-llms-encode-the-knowledge-to-assess-the-ambiguity-6zjbl-tmjns</loc>
    <changefreq>monthly</changefreq>
    <priority>0.5</priority>
    <lastmod>2026-03-31</lastmod>
    <image:image>
      <image:loc>https://images.squarespace-cdn.com/content/v1/61af5d2d7a632327e6590167/baa47359-40e7-49b0-b373-d92f9ec090dd/Marcel.png</image:loc>
      <image:title>Tech Blog - Video Self-Distillation for Single-Image Encoders: Learning Temporal Priors from Unlabeled Video - Marcel Simon, Ph. D. ML Researcher, Nota AI GmbH</image:title>
    </image:image>
    <image:image>
      <image:loc>https://images.squarespace-cdn.com/content/v1/61af5d2d7a632327e6590167/50808d6e-390c-4e10-99db-6f3fcde7f02d/cto.jpg</image:loc>
      <image:title>Tech Blog - Video Self-Distillation for Single-Image Encoders: Learning Temporal Priors from Unlabeled Video - Tae-Ho Kim CTO &amp; Co-Founder, Nota AI</image:title>
    </image:image>
    <image:image>
      <image:loc>https://images.squarespace-cdn.com/content/v1/61af5d2d7a632327e6590167/7a975d0b-635d-415b-bea3-c67794551042/Seul-ki.jpg</image:loc>
      <image:title>Tech Blog - Video Self-Distillation for Single-Image Encoders: Learning Temporal Priors from Unlabeled Video - Seul-Ki Yeom, Ph. D. Research Lead, Nota AI GmbH</image:title>
    </image:image>
    <image:image>
      <image:loc>https://images.squarespace-cdn.com/content/v1/61af5d2d7a632327e6590167/e84c3601-2c14-4124-bc86-a04badf50afb/1.png</image:loc>
      <image:title>Tech Blog - Video Self-Distillation for Single-Image Encoders: Learning Temporal Priors from Unlabeled Video - Make it stand out</image:title>
      <image:caption>Figure 1: Training Overview</image:caption>
    </image:image>
    <image:image>
      <image:loc>https://images.squarespace-cdn.com/content/v1/61af5d2d7a632327e6590167/45b37f43-4035-48ec-afd4-22b96f76da5b/2.png</image:loc>
      <image:title>Tech Blog - Video Self-Distillation for Single-Image Encoders: Learning Temporal Priors from Unlabeled Video - Make it stand out</image:title>
      <image:caption>Table 1. Semantic segmentation results on ADE20K</image:caption>
    </image:image>
    <image:image>
      <image:loc>https://images.squarespace-cdn.com/content/v1/61af5d2d7a632327e6590167/bc4387b0-e6b0-426a-b88d-cd6fb03f22bf/3.png</image:loc>
      <image:title>Tech Blog - Video Self-Distillation for Single-Image Encoders: Learning Temporal Priors from Unlabeled Video - Make it stand out</image:title>
      <image:caption>Table 2. Object detection results on COCO</image:caption>
    </image:image>
    <image:image>
      <image:loc>https://images.squarespace-cdn.com/content/v1/61af5d2d7a632327e6590167/02cf2a12-bc9f-47c4-a9e4-29035e1ea943/4.png</image:loc>
      <image:title>Tech Blog - Video Self-Distillation for Single-Image Encoders: Learning Temporal Priors from Unlabeled Video - Make it stand out</image:title>
      <image:caption>Figure 2. Effect of prediction stride ∆ on ADE20K fast-linear accuracy</image:caption>
    </image:image>
  </url>
  <url>
    <loc>https://www.nota.ai/community/uniform-a-reuse-attention-mechanism-for-efficient-transformers-on-resource-constrained-edge-devices-p98l9</loc>
    <changefreq>monthly</changefreq>
    <priority>0.5</priority>
    <lastmod>2025-08-25</lastmod>
    <image:image>
      <image:loc>https://images.squarespace-cdn.com/content/v1/61af5d2d7a632327e6590167/1240d30c-91fb-4846-8dcb-99f2ced4dbbe/Group+40158.png</image:loc>
      <image:title>Tech Blog - SplitQuant: Layer Splitting for Low-Bit Neural Network Quantization for Edge AI Devices - Jaewoo Song Software Engineer, Nota AI</image:title>
    </image:image>
    <image:image>
      <image:loc>https://images.squarespace-cdn.com/content/v1/61af5d2d7a632327e6590167/711de431-c12e-4a97-afc1-0eb41fd81ea0/%EC%9E%AC%EC%9A%B0%EB%8B%98+%ED%85%8C%ED%81%AC%EB%B8%94%EB%A1%9C%EA%B7%B8+%EC%82%BD%EB%8F%84.jpeg</image:loc>
      <image:title>Tech Blog - SplitQuant: Layer Splitting for Low-Bit Neural Network Quantization for Edge AI Devices - Make it stand out</image:title>
      <image:caption>Whatever it is, the way you tell your story online can make all the difference.</image:caption>
    </image:image>
    <image:image>
      <image:loc>https://images.squarespace-cdn.com/content/v1/61af5d2d7a632327e6590167/2db09f6b-f464-4147-9e31-7a095ad78ba6/1.jpeg</image:loc>
      <image:title>Tech Blog - SplitQuant: Layer Splitting for Low-Bit Neural Network Quantization for Edge AI Devices - Make it stand out</image:title>
      <image:caption>Whatever it is, the way you tell your story online can make all the difference.</image:caption>
    </image:image>
  </url>
  <url>
    <loc>https://www.nota.ai/community/where-do-llms-encode-the-knowledge-to-assess-the-ambiguity-6zjbl-xzl9k</loc>
    <changefreq>monthly</changefreq>
    <priority>0.5</priority>
    <lastmod>2025-05-09</lastmod>
    <image:image>
      <image:loc>https://images.squarespace-cdn.com/content/v1/61af5d2d7a632327e6590167/8876bcb3-57ae-442e-978c-27a2d26f4398/Group+40153.jpg</image:loc>
      <image:title>Tech Blog - Efficient LLaMA-3.2-Vision by Trimming Cross-attended Visual Features&lt;/span&gt; - Jewon Lee | Ki-Ung Song | Seungmin Yang | Donguk Lim | Jaeyeon Kim | Wooksu Shin | Bo-Kyeong Kim | Tae-Ho Kim EdgeFM Team, Nota AI</image:title>
    </image:image>
    <image:image>
      <image:loc>https://images.squarespace-cdn.com/content/v1/61af5d2d7a632327e6590167/77a63af5-9521-4314-ac67-22e3e96917ce/yongjaelee-Nov2019.jpg</image:loc>
      <image:title>Tech Blog - Efficient LLaMA-3.2-Vision by Trimming Cross-attended Visual Features&lt;/span&gt; - Yong Jae Lee, Ph. D. Associate Professor, UW-Madison</image:title>
    </image:image>
    <image:image>
      <image:loc>https://images.squarespace-cdn.com/content/v1/61af5d2d7a632327e6590167/875f49c6-3a52-4739-bbaf-085b75165ee0/2.png</image:loc>
      <image:title>Tech Blog - Efficient LLaMA-3.2-Vision by Trimming Cross-attended Visual Features&lt;/span&gt; - Make it stand out</image:title>
      <image:caption>Whatever it is, the way you tell your story online can make all the difference.</image:caption>
    </image:image>
    <image:image>
      <image:loc>https://images.squarespace-cdn.com/content/v1/61af5d2d7a632327e6590167/77726b7f-0490-4cec-b37a-27f601bf28e2/3.png</image:loc>
      <image:title>Tech Blog - Efficient LLaMA-3.2-Vision by Trimming Cross-attended Visual Features&lt;/span&gt; - Make it stand out</image:title>
      <image:caption>Whatever it is, the way you tell your story online can make all the difference.</image:caption>
    </image:image>
    <image:image>
      <image:loc>https://images.squarespace-cdn.com/content/v1/61af5d2d7a632327e6590167/14ecbd71-b993-4c2c-b20f-56e967174ac0/4.png</image:loc>
      <image:title>Tech Blog - Efficient LLaMA-3.2-Vision by Trimming Cross-attended Visual Features&lt;/span&gt; - Make it stand out</image:title>
      <image:caption>Whatever it is, the way you tell your story online can make all the difference.</image:caption>
    </image:image>
    <image:image>
      <image:loc>https://images.squarespace-cdn.com/content/v1/61af5d2d7a632327e6590167/9dd959c0-7e50-42b5-bfa7-47584def91ed/5.png</image:loc>
      <image:title>Tech Blog - Efficient LLaMA-3.2-Vision by Trimming Cross-attended Visual Features&lt;/span&gt; - Make it stand out</image:title>
      <image:caption>Whatever it is, the way you tell your story online can make all the difference.</image:caption>
    </image:image>
  </url>
  <url>
    <loc>https://www.nota.ai/community/uniform-a-reuse-attention-mechanism-for-efficient-transformers-on-resource-constrained-edge-devices</loc>
    <changefreq>monthly</changefreq>
    <priority>0.5</priority>
    <lastmod>2025-05-09</lastmod>
    <image:image>
      <image:loc>https://images.squarespace-cdn.com/content/v1/61af5d2d7a632327e6590167/e3eaf409-8b8a-46b8-82cd-8f7ab6b5da1b/%E1%84%89%E1%85%B3%E1%86%AF%E1%84%80%E1%85%B5_%E1%84%89%E1%85%A1%E1%84%8C%E1%85%B5%E1%86%AB.jpeg</image:loc>
      <image:title>Tech Blog - UniForm: A Reuse Attention Mechanism for Efficient Transformers on Resource-Constrained Edge Devices - Seul-Ki Yeom, Ph. D. Research Lead, Nota AI GmbH</image:title>
    </image:image>
    <image:image>
      <image:loc>https://images.squarespace-cdn.com/content/v1/61af5d2d7a632327e6590167/d2dcd933-3f45-4118-9088-2024ee379a71/CTO_Profile.jpeg</image:loc>
      <image:title>Tech Blog - UniForm: A Reuse Attention Mechanism for Efficient Transformers on Resource-Constrained Edge Devices - Tae-Ho Kim CTO &amp; Co-Founder, Nota AI</image:title>
    </image:image>
    <image:image>
      <image:loc>https://images.squarespace-cdn.com/content/v1/61af5d2d7a632327e6590167/d54fc727-d8dc-4149-bb0d-c1007cae7bdc/Screenshot+2025-04-08+at+11.09.59+AM.png</image:loc>
      <image:title>Tech Blog - UniForm: A Reuse Attention Mechanism for Efficient Transformers on Resource-Constrained Edge Devices - Make it stand out</image:title>
      <image:caption>Whatever it is, the way you tell your story online can make all the difference.</image:caption>
    </image:image>
    <image:image>
      <image:loc>https://images.squarespace-cdn.com/content/v1/61af5d2d7a632327e6590167/9f356e3f-52ed-445a-95ed-52d5278eb6f3/Screenshot+2025-04-08+at+11.13.07+AM.png</image:loc>
      <image:title>Tech Blog - UniForm: A Reuse Attention Mechanism for Efficient Transformers on Resource-Constrained Edge Devices - Make it stand out</image:title>
      <image:caption>Whatever it is, the way you tell your story online can make all the difference.</image:caption>
    </image:image>
  </url>
  <url>
    <loc>https://www.nota.ai/community/where-do-llms-encode-the-knowledge-to-assess-the-ambiguity-6zjbl</loc>
    <changefreq>monthly</changefreq>
    <priority>0.5</priority>
    <lastmod>2025-05-09</lastmod>
    <image:image>
      <image:loc>https://images.squarespace-cdn.com/content/v1/61af5d2d7a632327e6590167/8206d733-79d8-4aa7-8ca4-baf5d021704c/Performance_%EB%B0%95%ED%95%9C%EC%B2%A0.jpg</image:loc>
      <image:title>Tech Blog - A Study on Detecting LLM-Generated Multilingual Content - Hancheol Park, Ph. D. AI Research Engineer, Nota AI</image:title>
    </image:image>
    <image:image>
      <image:loc>https://images.squarespace-cdn.com/content/v1/61af5d2d7a632327e6590167/878f52d7-b7d0-4ca2-aba0-482ee65c4291/Netspresso_%EA%B9%80%EA%B1%B4%EB%AF%BC_%EC%BB%AC%EB%9F%AC.jpg</image:loc>
      <image:title>Tech Blog - A Study on Detecting LLM-Generated Multilingual Content - Geonmin Kim, Ph. D. AI Research Engineer, Nota AI</image:title>
    </image:image>
    <image:image>
      <image:loc>https://images.squarespace-cdn.com/content/v1/61af5d2d7a632327e6590167/ee338b10-6848-49d9-b17a-a3dca355fb95/jaeyeonkim.jpeg</image:loc>
      <image:title>Tech Blog - A Study on Detecting LLM-Generated Multilingual Content - Jaeyeon Kim AI Research Engineer, Nota AI</image:title>
    </image:image>
    <image:image>
      <image:loc>https://images.squarespace-cdn.com/content/v1/61af5d2d7a632327e6590167/f465a87d-bf5f-4a47-99be-6490a38f41f8/image.png</image:loc>
      <image:title>Tech Blog - A Study on Detecting LLM-Generated Multilingual Content - Make it stand out</image:title>
      <image:caption>Whatever it is, the way you tell your story online can make all the difference.</image:caption>
    </image:image>
    <image:image>
      <image:loc>https://images.squarespace-cdn.com/content/v1/61af5d2d7a632327e6590167/a72fa71e-993b-4176-a569-0ec4724e5880/graph.png</image:loc>
      <image:title>Tech Blog - A Study on Detecting LLM-Generated Multilingual Content - Make it stand out</image:title>
      <image:caption>Whatever it is, the way you tell your story online can make all the difference.</image:caption>
    </image:image>
    <image:image>
      <image:loc>https://images.squarespace-cdn.com/content/v1/61af5d2d7a632327e6590167/94a2b04b-c356-4d61-b6b1-0d123f770718/results.png</image:loc>
      <image:title>Tech Blog - A Study on Detecting LLM-Generated Multilingual Content - Make it stand out</image:title>
      <image:caption>Whatever it is, the way you tell your story online can make all the difference.</image:caption>
    </image:image>
  </url>
  <url>
    <loc>https://www.nota.ai/community/where-do-llms-encode-the-knowledge-to-assess-the-ambiguity</loc>
    <changefreq>monthly</changefreq>
    <priority>0.5</priority>
    <lastmod>2025-05-09</lastmod>
    <image:image>
      <image:loc>https://images.squarespace-cdn.com/content/v1/61af5d2d7a632327e6590167/8206d733-79d8-4aa7-8ca4-baf5d021704c/Performance_%EB%B0%95%ED%95%9C%EC%B2%A0.jpg</image:loc>
      <image:title>Tech Blog - Where do LLMs Encode the Knowledge to Assess the Ambiguity? - Hancheol Park, Ph. D. AI Research Engineer, Nota AI</image:title>
    </image:image>
    <image:image>
      <image:loc>https://images.squarespace-cdn.com/content/v1/61af5d2d7a632327e6590167/878f52d7-b7d0-4ca2-aba0-482ee65c4291/Netspresso_%EA%B9%80%EA%B1%B4%EB%AF%BC_%EC%BB%AC%EB%9F%AC.jpg</image:loc>
      <image:title>Tech Blog - Where do LLMs Encode the Knowledge to Assess the Ambiguity? - Geonmin Kim, Ph. D. AI Research Engineer, Nota AI</image:title>
    </image:image>
    <image:image>
      <image:loc>https://images.squarespace-cdn.com/content/v1/61af5d2d7a632327e6590167/3cef3786-3ac7-48f8-b2bd-3a50fc19deff/%EC%88%98%EC%A0%95%EA%B0%80%EB%8A%A5%ED%95%9C_%EC%9D%B4%EB%AF%B8%EC%A7%80.png</image:loc>
      <image:title>Tech Blog - Where do LLMs Encode the Knowledge to Assess the Ambiguity? - Make it stand out</image:title>
      <image:caption>Whatever it is, the way you tell your story online can make all the difference.</image:caption>
    </image:image>
    <image:image>
      <image:loc>https://images.squarespace-cdn.com/content/v1/61af5d2d7a632327e6590167/db076d9a-3345-43a3-a475-f0864bbc5990/image+%2824%29.png</image:loc>
      <image:title>Tech Blog - Where do LLMs Encode the Knowledge to Assess the Ambiguity? - Make it stand out</image:title>
      <image:caption>Whatever it is, the way you tell your story online can make all the difference.</image:caption>
    </image:image>
    <image:image>
      <image:loc>https://images.squarespace-cdn.com/content/v1/61af5d2d7a632327e6590167/7deee9ad-e6f6-42c9-ace3-5137e7c9a576/image+%2825%29.png</image:loc>
      <image:title>Tech Blog - Where do LLMs Encode the Knowledge to Assess the Ambiguity? - Make it stand out</image:title>
      <image:caption>Whatever it is, the way you tell your story online can make all the difference.</image:caption>
    </image:image>
    <image:image>
      <image:loc>https://images.squarespace-cdn.com/content/v1/61af5d2d7a632327e6590167/e2751f16-b768-4b8d-b512-a8c89c3608d4/image+%2826%29.png</image:loc>
      <image:title>Tech Blog - Where do LLMs Encode the Knowledge to Assess the Ambiguity? - Make it stand out</image:title>
      <image:caption>Whatever it is, the way you tell your story online can make all the difference.</image:caption>
    </image:image>
    <image:image>
      <image:loc>https://images.squarespace-cdn.com/content/v1/61af5d2d7a632327e6590167/39a3de92-65e9-4b8a-af20-158adeedc5d3/image+%2827%29.png</image:loc>
      <image:title>Tech Blog - Where do LLMs Encode the Knowledge to Assess the Ambiguity? - Make it stand out</image:title>
      <image:caption>Whatever it is, the way you tell your story online can make all the difference.</image:caption>
    </image:image>
    <image:image>
      <image:loc>https://images.squarespace-cdn.com/content/v1/61af5d2d7a632327e6590167/7fd8df3d-ca81-42d3-9984-27c3a3ef190a/image+%2828%29.png</image:loc>
      <image:title>Tech Blog - Where do LLMs Encode the Knowledge to Assess the Ambiguity? - Make it stand out</image:title>
      <image:caption>Whatever it is, the way you tell your story online can make all the difference.</image:caption>
    </image:image>
  </url>
  <url>
    <loc>https://www.nota.ai/community/deploying-an-efficient-vision-language-model-on-mobile-devices</loc>
    <changefreq>monthly</changefreq>
    <priority>0.5</priority>
    <lastmod>2025-05-09</lastmod>
    <image:image>
      <image:loc>https://images.squarespace-cdn.com/content/v1/61af5d2d7a632327e6590167/f0de44bb-3b4c-49b4-8cef-1baa2f580d8f/NOTA_Netspresso_%EA%B9%80%EC%9E%AC%EC%97%B0_%EC%BB%AC%EB%9F%AC.jpg</image:loc>
      <image:title>Tech Blog - Deploying an Efficient Vision-Language Model on Mobile Devices - Jaeyeon Kim Research Engineer, Nota AI</image:title>
    </image:image>
    <image:image>
      <image:loc>https://images.squarespace-cdn.com/content/v1/61af5d2d7a632327e6590167/878f52d7-b7d0-4ca2-aba0-482ee65c4291/Netspresso_%EA%B9%80%EA%B1%B4%EB%AF%BC_%EC%BB%AC%EB%9F%AC.jpg</image:loc>
      <image:title>Tech Blog - Deploying an Efficient Vision-Language Model on Mobile Devices - Geonmin Kim Research Engineer, Nota AI</image:title>
    </image:image>
    <image:image>
      <image:loc>https://images.squarespace-cdn.com/content/v1/61af5d2d7a632327e6590167/8206d733-79d8-4aa7-8ca4-baf5d021704c/Performance_%EB%B0%95%ED%95%9C%EC%B2%A0.jpg</image:loc>
      <image:title>Tech Blog - Deploying an Efficient Vision-Language Model on Mobile Devices - Hancheol Park Team Lead of NetsPresso Application, Nota AI</image:title>
    </image:image>
    <image:image>
      <image:loc>https://images.squarespace-cdn.com/content/v1/61af5d2d7a632327e6590167/683a2605-e97a-449d-916b-b13ec7847572/1+%EC%88%98%EC%A0%95.png</image:loc>
      <image:title>Tech Blog - Deploying an Efficient Vision-Language Model on Mobile Devices - Make it stand out</image:title>
      <image:caption>Whatever it is, the way you tell your story online can make all the difference.</image:caption>
    </image:image>
    <image:image>
      <image:loc>https://images.squarespace-cdn.com/content/v1/61af5d2d7a632327e6590167/aad112eb-ec74-44d1-b875-219dc2ae967c/2.png</image:loc>
      <image:title>Tech Blog - Deploying an Efficient Vision-Language Model on Mobile Devices - Make it stand out</image:title>
      <image:caption>Whatever it is, the way you tell your story online can make all the difference.</image:caption>
    </image:image>
    <image:image>
      <image:loc>https://images.squarespace-cdn.com/content/v1/61af5d2d7a632327e6590167/7bd754dc-92d1-4129-8e46-05bf4770dace/3.png</image:loc>
      <image:title>Tech Blog - Deploying an Efficient Vision-Language Model on Mobile Devices - Make it stand out</image:title>
      <image:caption>Whatever it is, the way you tell your story online can make all the difference.</image:caption>
    </image:image>
    <image:image>
      <image:loc>https://images.squarespace-cdn.com/content/v1/61af5d2d7a632327e6590167/e8460a41-8d93-4efb-b005-cba7578a67c3/240723_table1_img.png</image:loc>
      <image:title>Tech Blog - Deploying an Efficient Vision-Language Model on Mobile Devices - Make it stand out</image:title>
      <image:caption>Whatever it is, the way you tell your story online can make all the difference.</image:caption>
    </image:image>
  </url>
  <url>
    <loc>https://www.nota.ai/community/cluster-self-refinement-for-enhanced-online-multi-camera-people-tracking</loc>
    <changefreq>monthly</changefreq>
    <priority>0.5</priority>
    <lastmod>2025-05-09</lastmod>
    <image:image>
      <image:loc>https://images.squarespace-cdn.com/content/v1/61af5d2d7a632327e6590167/7aade0af-b23d-4427-be09-1af88ae7c11d/Performance_%EA%B9%80%EC%A0%95%ED%98%B8.jpg</image:loc>
      <image:title>Tech Blog - Cluster Self-Refinement for Enhanced Online Multi-Camera People Tracking - Jeongho Kim Research Engineer, Nota AI</image:title>
    </image:image>
    <image:image>
      <image:loc>https://images.squarespace-cdn.com/content/v1/61af5d2d7a632327e6590167/09b301e1-0584-4d14-8a02-de3c23a055d0/1.png</image:loc>
      <image:title>Tech Blog - Cluster Self-Refinement for Enhanced Online Multi-Camera People Tracking - Make it stand out</image:title>
      <image:caption>Figure 1. This is an example of Multi-Camera People Tracking. It involves tracking people across various cameras by mapping them to the same identities. The image in the center depicts a 2D map of the location, showing the estimated positions of people as captured by the cameras. The numbers provided represent their global IDs.</image:caption>
    </image:image>
    <image:image>
      <image:loc>https://images.squarespace-cdn.com/content/v1/61af5d2d7a632327e6590167/82f42f03-340a-4fb6-ae90-e5c0e4783dbe/2.png</image:loc>
      <image:title>Tech Blog - Cluster Self-Refinement for Enhanced Online Multi-Camera People Tracking - Make it stand out</image:title>
      <image:caption>Figure 2. Overview of our system’s architecture.</image:caption>
    </image:image>
    <image:image>
      <image:loc>https://images.squarespace-cdn.com/content/v1/61af5d2d7a632327e6590167/690edbf5-726a-4b06-85ed-6f7f6ec7b5bf/3.png</image:loc>
      <image:title>Tech Blog - Cluster Self-Refinement for Enhanced Online Multi-Camera People Tracking - Make it stand out</image:title>
      <image:caption>Figure 3. Overview of Cluster Self-Refinement. The left side depicts the refinement of appearance features, utilizing agglomerative clustering to check if different people are stored and, if correct, refine the appearance features in the cluster tracklet. The right side illustrates overlapped cluster refinement, addressing situations where one person has more than one global ID. The CSR procedure is carried out at regular intervals, as denoted by the red circle shown above.</image:caption>
    </image:image>
    <image:image>
      <image:loc>https://images.squarespace-cdn.com/content/v1/61af5d2d7a632327e6590167/b2566f82-70c9-4e9d-90e1-974de704709a/4.png</image:loc>
      <image:title>Tech Blog - Cluster Self-Refinement for Enhanced Online Multi-Camera People Tracking - Make it stand out</image:title>
      <image:caption>Table 1. The results of the ablation study on using a CSR and EUP. CSR and EUP stand for Cluster Self-Refinement and Enhanced Utilizing Pose estimation, respectively.</image:caption>
    </image:image>
    <image:image>
      <image:loc>https://images.squarespace-cdn.com/content/v1/61af5d2d7a632327e6590167/0c10e911-1c0c-4b57-866a-4453cf04eecf/5.png</image:loc>
      <image:title>Tech Blog - Cluster Self-Refinement for Enhanced Online Multi-Camera People Tracking - Make it stand out</image:title>
      <image:caption>Table 2. Public leaderboard for the Challenge Track 1</image:caption>
    </image:image>
  </url>
  <url>
    <loc>https://www.nota.ai/community/road-object-detection-robust-to-distorted-objects-at-the-edge-regions-of-images</loc>
    <changefreq>monthly</changefreq>
    <priority>0.5</priority>
    <lastmod>2025-05-09</lastmod>
    <image:image>
      <image:loc>https://images.squarespace-cdn.com/content/v1/61af5d2d7a632327e6590167/233935b8-4fd5-4935-93f9-decbd8dd3d77/NOTA_Performance_%EC%8B%A0%EC%9A%B1%EC%88%98.jpg</image:loc>
      <image:title>Tech Blog - Road Object Detection Robust to Distorted Objects at the Edge Regions of images - Wooksu Shin Research Engineer, Nota AI</image:title>
    </image:image>
    <image:image>
      <image:loc>https://images.squarespace-cdn.com/content/v1/61af5d2d7a632327e6590167/a0e5df95-d693-4338-bc7f-1a6afb4d2ab5/1.png</image:loc>
      <image:title>Tech Blog - Road Object Detection Robust to Distorted Objects at the Edge Regions of images - Make it stand out</image:title>
      <image:caption>Figure 1. An example of sliced inference. The region corresponding to the red box in the above image is resized to match the model's input size (as shown in the bottom image) before being inputted. As a result, the small objects within the yellow box are greatly enlarged, allowing the model to detect the target objects more accurately.</image:caption>
    </image:image>
    <image:image>
      <image:loc>https://images.squarespace-cdn.com/content/v1/61af5d2d7a632327e6590167/b5526cdf-279a-4cbf-a7b3-e5f0fc973af5/2.png</image:loc>
      <image:title>Tech Blog - Road Object Detection Robust to Distorted Objects at the Edge Regions of images - Make it stand out</image:title>
      <image:caption>Figure 2. (a) depicts a mis-prediction of a distorted non-target object. A street sign is distorted, creating visual similarity with the outline of a car, detectors occasionally mis-predict it as a car. After training on these non-target objects, the issue of incorrect predictions is resolved as shown in (b).</image:caption>
    </image:image>
    <image:image>
      <image:loc>https://images.squarespace-cdn.com/content/v1/61af5d2d7a632327e6590167/346edbe7-303c-4633-b34c-d1233af39a57/3.png</image:loc>
      <image:title>Tech Blog - Road Object Detection Robust to Distorted Objects at the Edge Regions of images - Make it stand out</image:title>
      <image:caption>Figure 3. Changes in pixel distribution with histogram equalization</image:caption>
    </image:image>
    <image:image>
      <image:loc>https://images.squarespace-cdn.com/content/v1/61af5d2d7a632327e6590167/f70f4b6d-5019-46a9-8d8a-d6dc07b422c5/4.png</image:loc>
      <image:title>Tech Blog - Road Object Detection Robust to Distorted Objects at the Edge Regions of images - Make it stand out</image:title>
      <image:caption>Table 1. Ensembled detectors that we use in this work. Swin-L (Liu et al., 2021), ViT-L (Dosovitskiy et al., 2021) indicate backbones of DETR models, DINO (Zhang et al., 2023) is an architecture of DETR series. Co-DINO (Swin-L) was pretrained with Objects365 (Shao et al., 2019) and COCO (Lin et al., 2014) datasets. Co-DINO (ViT-L) was pretrained with Objects365 and LVIS. All models are fine-tuned with FishEye8K (Gochoo et al., 2023) dataset in this work.</image:caption>
    </image:image>
    <image:image>
      <image:loc>https://images.squarespace-cdn.com/content/v1/61af5d2d7a632327e6590167/d29514b0-4001-4a4f-818d-b543d5b4bed1/5.png</image:loc>
      <image:title>Tech Blog - Road Object Detection Robust to Distorted Objects at the Edge Regions of images - Make it stand out</image:title>
      <image:caption>Figure 4. Examples of weighted boxes fusion (WBF). The above two images show bounding boxes predicted by different models. In the image below, these bounding boxes are combined into a single box using WBF.</image:caption>
    </image:image>
    <image:image>
      <image:loc>https://images.squarespace-cdn.com/content/v1/61af5d2d7a632327e6590167/de733fdd-b002-45d6-9556-413a3fb03413/6.png</image:loc>
      <image:title>Tech Blog - Road Object Detection Robust to Distorted Objects at the Edge Regions of images - Make it stand out</image:title>
      <image:caption>Table 2. The results of ablation study on using sliced inference and semi-supervision, respectively</image:caption>
    </image:image>
    <image:image>
      <image:loc>https://images.squarespace-cdn.com/content/v1/61af5d2d7a632327e6590167/e56512f6-2225-445e-b5d9-df6b7490e577/7.png</image:loc>
      <image:title>Tech Blog - Road Object Detection Robust to Distorted Objects at the Edge Regions of images - Make it stand out</image:title>
      <image:caption>Table 3. Public Top 10 leaderboard for the Challenge Track 4</image:caption>
    </image:image>
  </url>
  <url>
    <loc>https://www.nota.ai/community/edgefusion-on-device-text-to-image-generation</loc>
    <changefreq>monthly</changefreq>
    <priority>0.5</priority>
    <lastmod>2025-05-09</lastmod>
    <image:image>
      <image:loc>https://images.squarespace-cdn.com/content/v1/61af5d2d7a632327e6590167/b83ae155-08fb-4370-8431-78340d910a07/%EB%B0%95%ED%83%9C%EC%9E%84%EB%8B%98.jpg</image:loc>
      <image:title>Tech Blog - EdgeFusion: On-device Text-to-Image Generation - Tairen Piao Research Engineer, Nota AI</image:title>
    </image:image>
    <image:image>
      <image:loc>https://images.squarespace-cdn.com/content/v1/61af5d2d7a632327e6590167/86287ad9-7afc-43f7-8cb2-f8ab85e7ab67/Edge+Fusion_Figure+1.jpg</image:loc>
      <image:title>Tech Blog - EdgeFusion: On-device Text-to-Image Generation - Make it stand out</image:title>
      <image:caption>Figure 1. T2I generation results. When trained with improved data, our EdgeFusion can produce high-quality images from challenging prompts in just a few denoising steps</image:caption>
    </image:image>
    <image:image>
      <image:loc>https://images.squarespace-cdn.com/content/v1/61af5d2d7a632327e6590167/6fae71b8-cc34-4335-954a-b033e5c47cb1/Edge+Fusion_Figure+2.jpg</image:loc>
      <image:title>Tech Blog - EdgeFusion: On-device Text-to-Image Generation - Make it stand out</image:title>
      <image:caption>Figure 2. A compact SD with step reduction. (a) Vanilla application of LCM: we initialize BK-LCM-Tiny with the weight from BK-SDM-Tiny and train with distillation to reduce sampling steps. (b) Our approach: improving the initialization of the LCM’s student with a better teacher is beneficial. Moreover, in the LCM training phase, employing the original teacher enhances performance. Leveraging high-quality data is crucial in both phases.</image:caption>
    </image:image>
    <image:image>
      <image:loc>https://images.squarespace-cdn.com/content/v1/61af5d2d7a632327e6590167/bc718412-6b0c-46c5-83fe-29520d76fc16/Edge+Fusion_Figure+3.jpg</image:loc>
      <image:title>Tech Blog - EdgeFusion: On-device Text-to-Image Generation - Make it stand out</image:title>
      <image:caption>Table 1. Results of the BK-SDM-Tiny architecture trained on different datasets. Evaluated on COCO dataset with 25 steps.</image:caption>
    </image:image>
    <image:image>
      <image:loc>https://images.squarespace-cdn.com/content/v1/61af5d2d7a632327e6590167/5952a859-a2bc-4e31-8ee8-7040e8f6657a/Edge+Fusion_Figure+4.jpg</image:loc>
      <image:title>Tech Blog - EdgeFusion: On-device Text-to-Image Generation - Make it stand out</image:title>
      <image:caption>Table 2. Human preference evaluation. The win rate of our models against the same architecture without improved data and without student finetuning is reported (1500 comparisons, 21 participants).</image:caption>
    </image:image>
    <image:image>
      <image:loc>https://images.squarespace-cdn.com/content/v1/61af5d2d7a632327e6590167/2f6d0cd9-0edd-4c7f-8c8d-bd76db228c95/Edge+Fusion_Figure+5.jpg</image:loc>
      <image:title>Tech Blog - EdgeFusion: On-device Text-to-Image Generation - Make it stand out</image:title>
      <image:caption>Figure 3. Comparison between FP32 and W8A16 quantized EdgeFusion (2 steps)</image:caption>
    </image:image>
    <image:image>
      <image:loc>https://images.squarespace-cdn.com/content/v1/61af5d2d7a632327e6590167/18af17c9-4e00-43bd-ad72-526e8f8e651b/Edge+Fusion_Figure+6.jpg</image:loc>
      <image:title>Tech Blog - EdgeFusion: On-device Text-to-Image Generation - Make it stand out</image:title>
      <image:caption>Table 3. Inference time reduction using MLT. For the cross at- tention block, the relative time ratio is computed by comparing operations without MLT to those with MLT.</image:caption>
    </image:image>
    <image:image>
      <image:loc>https://images.squarespace-cdn.com/content/v1/61af5d2d7a632327e6590167/2af4b4f8-9948-455d-bb76-77919ac85ec2/Edge+Fusion_Figure+7.png</image:loc>
      <image:title>Tech Blog - EdgeFusion: On-device Text-to-Image Generation - Make it stand out</image:title>
      <image:caption>Table 4. Benchmark on Exynos 2400 with and without MLT.</image:caption>
    </image:image>
  </url>
  <url>
    <loc>https://www.nota.ai/community/ld-pruner-efficient-pruning-of-latent-diffusion-models-using-task-agnostic-insights</loc>
    <changefreq>monthly</changefreq>
    <priority>0.5</priority>
    <lastmod>2025-05-09</lastmod>
    <image:image>
      <image:loc>https://images.squarespace-cdn.com/content/v1/61af5d2d7a632327e6590167/b4c73e6e-93a2-45fa-b810-3a5502a91744/Thibault.png</image:loc>
      <image:title>Tech Blog - LD-Pruner: Efficient Pruning of Latent Diffusion Models using Task-Agnostic Insights - Thibault Castells Research Engineer, Nota AI</image:title>
    </image:image>
    <image:image>
      <image:loc>https://images.squarespace-cdn.com/content/v1/61af5d2d7a632327e6590167/9b11c711-7724-4dac-b7b7-b032e44bec03/Screenshot+2024-04-11+at+3.07.29%E2%80%AFPM.png</image:loc>
      <image:title>Tech Blog - LD-Pruner: Efficient Pruning of Latent Diffusion Models using Task-Agnostic Insights - Make it stand out</image:title>
      <image:caption>Whatever it is, the way you tell your story online can make all the difference.</image:caption>
    </image:image>
    <image:image>
      <image:loc>https://images.squarespace-cdn.com/content/v1/61af5d2d7a632327e6590167/60f2b663-1ab8-4e1c-9c4a-dc0b08cc1af1/Screenshot+2024-04-11+at+3.45.38%E2%80%AFPM.png</image:loc>
      <image:title>Tech Blog - LD-Pruner: Efficient Pruning of Latent Diffusion Models using Task-Agnostic Insights - Make it stand out</image:title>
      <image:caption>Table 1: Comparison of different models for T2I Generation, on the MS-COCO 256 X 256 validation set. Speedup values are measured relatively to SD-v1.4.</image:caption>
    </image:image>
    <image:image>
      <image:loc>https://images.squarespace-cdn.com/content/v1/61af5d2d7a632327e6590167/23e98b11-69f9-4332-9368-1eaf25039aac/Screenshot+2024-04-11+at+3.44.30%E2%80%AFPM.png</image:loc>
      <image:title>Tech Blog - LD-Pruner: Efficient Pruning of Latent Diffusion Models using Task-Agnostic Insights - Make it stand out</image:title>
      <image:caption>Figure 2: Qualitative comparison on zero-shot MS-COCO benchmark on T2I. The results of previous studies were obtained with their official released models.</image:caption>
    </image:image>
    <image:image>
      <image:loc>https://images.squarespace-cdn.com/content/v1/61af5d2d7a632327e6590167/556368ef-2053-4af4-82cd-c094a33351fa/Screenshot+2024-04-11+at+3.49.52%E2%80%AFPM.png</image:loc>
      <image:title>Tech Blog - LD-Pruner: Efficient Pruning of Latent Diffusion Models using Task-Agnostic Insights - Make it stand out</image:title>
      <image:caption>Figure 3: Evolution of the FID during the training process for the UIG task on the CelebA-HQ 256 X 256 dataset, for two different compression ratios.</image:caption>
    </image:image>
    <image:image>
      <image:loc>https://images.squarespace-cdn.com/content/v1/61af5d2d7a632327e6590167/5f8206ae-7be1-4c61-90da-ce1ecfa67156/Screenshot+2024-04-11+at+3.54.54%E2%80%AFPM.png</image:loc>
      <image:title>Tech Blog - LD-Pruner: Efficient Pruning of Latent Diffusion Models using Task-Agnostic Insights - Make it stand out</image:title>
      <image:caption>Table 2: Compression performance on UAG task with AudioDiffusion. When finetuning, we proceed for 12k steps.</image:caption>
    </image:image>
    <image:image>
      <image:loc>https://images.squarespace-cdn.com/content/v1/61af5d2d7a632327e6590167/62003489-84c8-4cfc-a409-6ee594306053/Screenshot+2024-04-11+at+4.01.54%E2%80%AFPM.png</image:loc>
      <image:title>Tech Blog - LD-Pruner: Efficient Pruning of Latent Diffusion Models using Task-Agnostic Insights - Make it stand out</image:title>
      <image:caption>Table 3: FID scores for our compressed model (31 operators modified) trained from scratch and with preserved pre-training weights, for UIG on CelebA-HQ 256 X 256. In both case, the exact same training is applied. The FID for the original model is 13.85.</image:caption>
    </image:image>
  </url>
  <url>
    <loc>https://www.nota.ai/community/shortened-llm-a-simple-depth-pruning-for-large-language-models</loc>
    <changefreq>monthly</changefreq>
    <priority>0.5</priority>
    <lastmod>2025-05-09</lastmod>
    <image:image>
      <image:loc>https://images.squarespace-cdn.com/content/v1/61af5d2d7a632327e6590167/9f76ea45-b972-4d3d-a06e-258ab5b32493/%EB%B3%B4%EA%B2%BD%EB%8B%98.png</image:loc>
      <image:title>Tech Blog - Shortened LLM: A Simple Depth Pruning for Large Language Models - Bo-Kyeong Kim Senior Researcher, Nota AI</image:title>
    </image:image>
    <image:image>
      <image:loc>https://images.squarespace-cdn.com/content/v1/61af5d2d7a632327e6590167/a1002d7c-f4ac-4826-991d-2dd903ad9902/image+%284%29.png</image:loc>
      <image:title>Tech Blog - Shortened LLM: A Simple Depth Pruning for Large Language Models - Make it stand out</image:title>
      <image:caption>Figure 1. (a) Comparison of pruning units. Width pruning reduces the size of projection weight matrices. Depth pruning removes Transformer blocks, or individual MHA and FFN modules. (b) Efficiency of pruned LLaMA-7B models on an NVIDIA H100 GPU. Compared to width pruning by FLAP and LLM-Pruner, our depth pruning achieves faster inference with competitive PPL on WikiText-2 (left) and offers a better latency-throughput trade-off (right; M: batch size).</image:caption>
    </image:image>
    <image:image>
      <image:loc>https://images.squarespace-cdn.com/content/v1/61af5d2d7a632327e6590167/1bdfc777-b8fa-44a2-b064-8afe01092441/image+%285%29.png</image:loc>
      <image:title>Tech Blog - Shortened LLM: A Simple Depth Pruning for Large Language Models - Make it stand out</image:title>
      <image:caption>Figure 2. Our depth pruning approach. After identifying unimportant blocks with straightforward metrics, we perform one-shot pruning followed by light retraining. Right LoRA figure was sourced from Hu et al. [2022].</image:caption>
    </image:image>
    <image:image>
      <image:loc>https://images.squarespace-cdn.com/content/v1/61af5d2d7a632327e6590167/2546359a-5dbb-4a59-806f-a4a3fb2a045f/pdf_fig_ppl_bookcorpus_v1.png</image:loc>
      <image:title>Tech Blog - Shortened LLM: A Simple Depth Pruning for Large Language Models - Make it stand out</image:title>
      <image:caption>Figure 3. Estimated importance of each Transformer block on the calibration set. Blocks with lower PPL scores are pruned.</image:caption>
    </image:image>
    <image:image>
      <image:loc>https://images.squarespace-cdn.com/content/v1/61af5d2d7a632327e6590167/6bf1f300-42cc-4874-b5ce-3f2d35183cfe/image+%286%29.png</image:loc>
      <image:title>Tech Blog - Shortened LLM: A Simple Depth Pruning for Large Language Models - Make it stand out</image:title>
      <image:caption>Figure 4. Results of pruned LLaMA-1-7B [Touvron et al., 2023] and Vicuna-v1.3-13B [Chiang et al., 2023]. The width pruning methods of Wanda-sp [Sun et al., 2024; An et al., 2024], FLAP [An et al., 2024], and LLM-Pruner [Ma et al., 2023] often degrade inference efficiency. In contrast, our depth pruning approach enhances generation speed and competes well in zero-shot task performance.</image:caption>
    </image:image>
    <image:image>
      <image:loc>https://images.squarespace-cdn.com/content/v1/61af5d2d7a632327e6590167/0d2fd7af-5c53-4278-8048-48a4cef6605d/image+%287%29.png</image:loc>
      <image:title>Tech Blog - Shortened LLM: A Simple Depth Pruning for Large Language Models - Make it stand out</image:title>
      <image:caption>Figure 5. Generation examples. Given an input prompt about 'AI can create a logo in seconds,' the pruned models generate outputs that are similar to those of the original models.</image:caption>
    </image:image>
  </url>
  <url>
    <loc>https://www.nota.ai/community/integrating-launchx-with-nvidia-tao-toolkit-for-running-on-various-edge-devices</loc>
    <changefreq>monthly</changefreq>
    <priority>0.5</priority>
    <lastmod>2025-05-09</lastmod>
    <image:image>
      <image:loc>https://images.squarespace-cdn.com/content/v1/61af5d2d7a632327e6590167/c8fc65d5-3d00-46c3-ac96-745fff82262b/%E1%84%83%E1%85%A2%E1%84%8C%E1%85%B5+1%402x.jpg</image:loc>
      <image:title>Tech Blog - Integrating LaunchX with NVIDIA TAO Toolkit for Running on Various Edge Devices - Hoin Na CoS Tech Part Manager, Nota AI</image:title>
    </image:image>
    <image:image>
      <image:loc>https://images.squarespace-cdn.com/content/v1/61af5d2d7a632327e6590167/b7c5db47-8138-43c3-9775-fd657ad6b01c/Screenshot_2023-11-14_at_4.55.44.png</image:loc>
      <image:title>Tech Blog - Integrating LaunchX with NVIDIA TAO Toolkit for Running on Various Edge Devices - Make it stand out</image:title>
      <image:caption>Figure 1. Flow of NVIDIA TAO Toolkit (TAO Toolkit | NVIDIA Developer)</image:caption>
    </image:image>
    <image:image>
      <image:loc>https://images.squarespace-cdn.com/content/v1/61af5d2d7a632327e6590167/2e51dafa-636a-473e-91c7-b7374dfc8997/Screenshot_2023-11-14_at_7.26.14.png</image:loc>
      <image:title>Tech Blog - Integrating LaunchX with NVIDIA TAO Toolkit for Running on Various Edge Devices - Make it stand out</image:title>
      <image:caption>Figure2. LaunchX</image:caption>
    </image:image>
    <image:image>
      <image:loc>https://images.squarespace-cdn.com/content/v1/61af5d2d7a632327e6590167/15835f8a-f236-4088-afab-94cc5df436d8/Screenshot_2023-11-14_at_8.21.20.png</image:loc>
      <image:title>Tech Blog - Integrating LaunchX with NVIDIA TAO Toolkit for Running on Various Edge Devices - Make it stand out</image:title>
      <image:caption>Figure 3. Multiple benchmarks of MobilenetV2(TF1) model by LaunchX</image:caption>
    </image:image>
    <image:image>
      <image:loc>https://images.squarespace-cdn.com/content/v1/61af5d2d7a632327e6590167/bdcf7b01-00c4-480b-9ca4-c3907b38f0b0/%EC%8A%A4%ED%81%AC%EB%A6%B0%EC%83%B7+2023-11-14+165817.png</image:loc>
      <image:title>Tech Blog - Integrating LaunchX with NVIDIA TAO Toolkit for Running on Various Edge Devices - Make it stand out</image:title>
      <image:caption>Whatever it is, the way you tell your story online can make all the difference.</image:caption>
    </image:image>
  </url>
  <url>
    <loc>https://www.nota.ai/community/how-netspresso-turbocharges-semantic-segmentation-models</loc>
    <changefreq>monthly</changefreq>
    <priority>0.5</priority>
    <lastmod>2025-05-09</lastmod>
    <image:image>
      <image:loc>https://images.squarespace-cdn.com/content/v1/61af5d2d7a632327e6590167/faff29dd-4db6-4157-9506-a1a375685ef3/YoonJae+Yang.gif</image:loc>
      <image:title>Tech Blog - Revolutionizing Mobile AI: How NetsPresso® Turbocharges Semantic Segmentation Models for Real-Time Performance - YoonJae Yang AI Application Developer, Nota AI</image:title>
    </image:image>
    <image:image>
      <image:loc>https://images.squarespace-cdn.com/content/v1/61af5d2d7a632327e6590167/cc09558c-263c-45dc-b8d1-6e9f239b8e18/Netspresso_%E1%84%8B%E1%85%B5%E1%84%92%E1%85%A7%E1%86%BC%E1%84%8C%E1%85%AE%E1%86%AB_%E1%84%8F%E1%85%A5%E1%86%AF%E1%84%85%E1%85%A5.jpg</image:loc>
      <image:title>Tech Blog - Revolutionizing Mobile AI: How NetsPresso® Turbocharges Semantic Segmentation Models for Real-Time Performance - Hyungjun Lee Research Engineer, Nota AI</image:title>
    </image:image>
    <image:image>
      <image:loc>https://images.squarespace-cdn.com/content/v1/61af5d2d7a632327e6590167/f4a6c693-4f7f-48e6-8d17-29ae2ac71229/Article-4_table1.png</image:loc>
      <image:title>Tech Blog - Revolutionizing Mobile AI: How NetsPresso® Turbocharges Semantic Segmentation Models for Real-Time Performance - Make it stand out</image:title>
      <image:caption>Whatever it is, the way you tell your story online can make all the difference.</image:caption>
    </image:image>
    <image:image>
      <image:loc>https://images.squarespace-cdn.com/content/v1/61af5d2d7a632327e6590167/cb7d4195-014e-4092-9b51-76d8b93c71d3/Article-4_table2.png</image:loc>
      <image:title>Tech Blog - Revolutionizing Mobile AI: How NetsPresso® Turbocharges Semantic Segmentation Models for Real-Time Performance - Make it stand out</image:title>
      <image:caption>These results underline the significant improvement in latency achieved by compressing the PIDNet model using NetsPresso®'s structured pruning. This optimization makes it a more viable choice for real-time applications, such as video conferencing and video calls, on mobile devices. In conclusion, our journey into mobile optimization has shown that with the right tools and techniques, even the most computationally demanding AI models can be made efficient for mobile platforms. This opens up exciting possibilities for applications that require real-time, on-device AI processing.</image:caption>
    </image:image>
  </url>
  <url>
    <loc>https://www.nota.ai/community/empowering-pedestrian-safety-the-obstacle-detection-app-and-ai-model-optimization</loc>
    <changefreq>monthly</changefreq>
    <priority>0.5</priority>
    <lastmod>2025-05-07</lastmod>
    <image:image>
      <image:loc>https://images.squarespace-cdn.com/content/v1/61af5d2d7a632327e6590167/faff29dd-4db6-4157-9506-a1a375685ef3/YoonJae+Yang.gif</image:loc>
      <image:title>Tech Blog - Empowering Pedestrian Safety: The Obstacle Detection App and AI Model Optimization - Yoonjae Yang AI Application Developer, Nota AI</image:title>
    </image:image>
    <image:image>
      <image:loc>https://images.squarespace-cdn.com/content/v1/61af5d2d7a632327e6590167/18c4a069-8c01-4518-9bb3-3f6c343e1cc2/%EB%8C%80%EC%A7%80+1%403x.png</image:loc>
      <image:title>Tech Blog - Empowering Pedestrian Safety: The Obstacle Detection App and AI Model Optimization - Make it stand out</image:title>
      <image:caption>Whatever it is, the way you tell your story online can make all the difference.</image:caption>
    </image:image>
  </url>
  <url>
    <loc>https://www.nota.ai/community/revolutionizing-laundry-symbol-detection-with-ai-model-optimization-streamlining-the-process-for-precise-results</loc>
    <changefreq>monthly</changefreq>
    <priority>0.5</priority>
    <lastmod>2025-05-07</lastmod>
    <image:image>
      <image:loc>https://images.squarespace-cdn.com/content/v1/61af5d2d7a632327e6590167/a1a9712f-5b67-4cbd-8b15-4b68fd9684eb/%EB%AC%B4%EC%A0%9C-1.gif</image:loc>
      <image:title>Tech Blog - Revolutionizing Laundry Symbol Detection with AI Model Optimization: Streamlining the Process for Precise Results - Yoonjae Yang AI Application Developer, Nota AI</image:title>
    </image:image>
    <image:image>
      <image:loc>https://images.squarespace-cdn.com/content/v1/61af5d2d7a632327e6590167/7bf478b3-e3cd-473b-8150-81bea46b3fca/Template_Community_Usecasetable_e5d686cc-54ef-4025-b6fb-e0471967f968.png</image:loc>
      <image:title>Tech Blog - Revolutionizing Laundry Symbol Detection with AI Model Optimization: Streamlining the Process for Precise Results - Make it stand out</image:title>
      <image:caption>Whatever it is, the way you tell your story online can make all the difference.</image:caption>
    </image:image>
  </url>
  <url>
    <loc>https://www.nota.ai/community/developing-a-pothole-detection-application-for-safe-driving</loc>
    <changefreq>monthly</changefreq>
    <priority>0.5</priority>
    <lastmod>2025-05-09</lastmod>
    <image:image>
      <image:loc>https://images.squarespace-cdn.com/content/v1/61af5d2d7a632327e6590167/c3c78916-c127-4518-b7c2-4dd1c59c129d/%EB%8C%80%EC%A7%80+1%403x.jpg</image:loc>
      <image:title>Tech Blog - Advancing Road Safety with AI-Powered Pothole Detection: Performance Enhancements for AI Vision Developers and Engineers - YoonJae Yang AI Application Developer, Nota AI</image:title>
    </image:image>
    <image:image>
      <image:loc>https://images.squarespace-cdn.com/content/v1/61af5d2d7a632327e6590167/fd5cbc44-a9f4-446c-8f69-db806d38bbd9/Netspresso_%EC%9D%B4%ED%98%95%EC%A4%80_%EC%BB%AC%EB%9F%AC.jpg</image:loc>
      <image:title>Tech Blog - Advancing Road Safety with AI-Powered Pothole Detection: Performance Enhancements for AI Vision Developers and Engineers - Hyungjun Lee Research Engineer, Nota AI</image:title>
    </image:image>
    <image:image>
      <image:loc>https://images.squarespace-cdn.com/content/v1/61af5d2d7a632327e6590167/63626450-542f-4b0c-af4a-1c0ceaf9cadc/Pothole_table+1.png</image:loc>
      <image:title>Tech Blog - Advancing Road Safety with AI-Powered Pothole Detection: Performance Enhancements for AI Vision Developers and Engineers - Make it stand out</image:title>
      <image:caption>Whatever it is, the way you tell your story online can make all the difference.</image:caption>
    </image:image>
    <image:image>
      <image:loc>https://images.squarespace-cdn.com/content/v1/61af5d2d7a632327e6590167/f9c99777-d196-4f68-b278-b96954d7e275/Pothole_table+2.png</image:loc>
      <image:title>Tech Blog - Advancing Road Safety with AI-Powered Pothole Detection: Performance Enhancements for AI Vision Developers and Engineers - Make it stand out</image:title>
      <image:caption>Whatever it is, the way you tell your story online can make all the difference.</image:caption>
    </image:image>
  </url>
  <url>
    <loc>https://www.nota.ai/community/enhancing-real-time-processing-of-yolov5-l-using-pruning-techniques-in-pynetspresso</loc>
    <changefreq>monthly</changefreq>
    <priority>0.5</priority>
    <lastmod>2025-05-09</lastmod>
    <image:image>
      <image:loc>https://images.squarespace-cdn.com/content/v1/61af5d2d7a632327e6590167/7aeab08e-96a7-4bdc-8b34-f3040964a40d/Netspresso_%EC%9D%B4%ED%98%95%EC%A4%80_%EC%BB%AC%EB%9F%AC.jpg</image:loc>
      <image:title>Tech Blog - Enhancing Real-Time Processing of YOLOv5-L Using Pruning Techniques in PyNetsPresso - Hyungjun Lee Research Engineer, Nota AI</image:title>
    </image:image>
    <image:image>
      <image:loc>https://images.squarespace-cdn.com/content/v1/61af5d2d7a632327e6590167/521968a5-f19d-4f8b-986c-385bf4a08f94/Community_1+%282%29.png</image:loc>
      <image:title>Tech Blog - Enhancing Real-Time Processing of YOLOv5-L Using Pruning Techniques in PyNetsPresso - Make it stand out</image:title>
      <image:caption>Figure 1: Achieving real-time FPS with minimal performance degradation using PyNetsPresso.</image:caption>
    </image:image>
    <image:image>
      <image:loc>https://images.squarespace-cdn.com/content/v1/61af5d2d7a632327e6590167/91d22cc2-1797-4dd5-b53b-502c0df8b744/pr-latency.png</image:loc>
      <image:title>Tech Blog - Enhancing Real-Time Processing of YOLOv5-L Using Pruning Techniques in PyNetsPresso - Make it stand out</image:title>
      <image:caption>Figure 2-1: Pruning ratio and latency graph. Latency measured on Jetson Xavier.</image:caption>
    </image:image>
    <image:image>
      <image:loc>https://images.squarespace-cdn.com/content/v1/61af5d2d7a632327e6590167/8d466d11-446c-4e9f-ba27-978a73be9449/pr-map.png</image:loc>
      <image:title>Tech Blog - Enhancing Real-Time Processing of YOLOv5-L Using Pruning Techniques in PyNetsPresso - Make it stand out</image:title>
      <image:caption>Figure 2-2: Pruning ratio and mAP graph.</image:caption>
    </image:image>
    <image:image>
      <image:loc>https://images.squarespace-cdn.com/content/v1/61af5d2d7a632327e6590167/1fd387f5-0781-4009-9bbe-21589280daac/latency-map.png</image:loc>
      <image:title>Tech Blog - Enhancing Real-Time Processing of YOLOv5-L Using Pruning Techniques in PyNetsPresso - Make it stand out</image:title>
      <image:caption>Figure 2 -3:Latency and mAP graph. Latency measured on Jetson Xavier.</image:caption>
    </image:image>
    <image:image>
      <image:loc>https://images.squarespace-cdn.com/content/v1/61af5d2d7a632327e6590167/6264a472-c610-4ab9-804d-d8d67bb5ab4a/Summary+table.png</image:loc>
      <image:title>Tech Blog - Enhancing Real-Time Processing of YOLOv5-L Using Pruning Techniques in PyNetsPresso - Make it stand out</image:title>
      <image:caption>Whatever it is, the way you tell your story online can make all the difference.</image:caption>
    </image:image>
    <image:image>
      <image:loc>https://images.squarespace-cdn.com/content/v1/61af5d2d7a632327e6590167/0b77985f-2cc5-445a-aafd-0ea3671f627e/Table.png</image:loc>
      <image:title>Tech Blog - Enhancing Real-Time Processing of YOLOv5-L Using Pruning Techniques in PyNetsPresso - Make it stand out</image:title>
      <image:caption>Whatever it is, the way you tell your story online can make all the difference.</image:caption>
    </image:image>
    <image:image>
      <image:loc>https://images.squarespace-cdn.com/content/v1/61af5d2d7a632327e6590167/33bea7ed-6b66-451d-88bf-29ef1b2376ca/Community+article_5.png</image:loc>
      <image:title>Tech Blog - Enhancing Real-Time Processing of YOLOv5-L Using Pruning Techniques in PyNetsPresso - Make it stand out</image:title>
      <image:caption>Figure 3: Conventional pruning methods make it impossible to achieve acceleration since parameters cannot be completely eliminated.</image:caption>
    </image:image>
    <image:image>
      <image:loc>https://images.squarespace-cdn.com/content/v1/61af5d2d7a632327e6590167/f601d527-9e7b-4727-9d4b-f68f9ba01efa/Figure+4.png</image:loc>
      <image:title>Tech Blog - Enhancing Real-Time Processing of YOLOv5-L Using Pruning Techniques in PyNetsPresso - Make it stand out</image:title>
      <image:caption>Figure 4: Dependency Aware Pruning creates a lightweight artificial neural network by completely removing parameters, taking into account the input/output relationships between each layer.</image:caption>
    </image:image>
    <image:image>
      <image:loc>https://images.squarespace-cdn.com/content/v1/61af5d2d7a632327e6590167/04c16486-00cc-4361-90f0-ac3651200813/Figure+5+%282%29.png</image:loc>
      <image:title>Tech Blog - Enhancing Real-Time Processing of YOLOv5-L Using Pruning Techniques in PyNetsPresso - Make it stand out</image:title>
      <image:caption>Figure 5: The operation of Dependency Aware Pruning in Skip-Connections allows for the actual acceleration of artificial neural networks by tracking and eliminating dependencies.</image:caption>
    </image:image>
    <image:image>
      <image:loc>https://images.squarespace-cdn.com/content/v1/61af5d2d7a632327e6590167/ad7a3e40-db2e-41a1-ae73-e640af78ab5b/Python_1.png</image:loc>
      <image:title>Tech Blog - Enhancing Real-Time Processing of YOLOv5-L Using Pruning Techniques in PyNetsPresso - Make it stand out</image:title>
      <image:caption>Whatever it is, the way you tell your story online can make all the difference.</image:caption>
    </image:image>
    <image:image>
      <image:loc>https://images.squarespace-cdn.com/content/v1/61af5d2d7a632327e6590167/e466f87a-d88b-4a66-b42d-45528b2ffbd4/Python_2.png</image:loc>
      <image:title>Tech Blog - Enhancing Real-Time Processing of YOLOv5-L Using Pruning Techniques in PyNetsPresso - Make it stand out</image:title>
      <image:caption>Whatever it is, the way you tell your story online can make all the difference.</image:caption>
    </image:image>
    <image:image>
      <image:loc>https://images.squarespace-cdn.com/content/v1/61af5d2d7a632327e6590167/9e4190d6-a095-4706-924e-d302d4841eaa/Python_3.png</image:loc>
      <image:title>Tech Blog - Enhancing Real-Time Processing of YOLOv5-L Using Pruning Techniques in PyNetsPresso - Make it stand out</image:title>
      <image:caption>Whatever it is, the way you tell your story online can make all the difference.</image:caption>
    </image:image>
    <image:image>
      <image:loc>https://images.squarespace-cdn.com/content/v1/61af5d2d7a632327e6590167/bf79d815-e2d4-4969-b0a3-ed860d17c291/Python_6.png</image:loc>
      <image:title>Tech Blog - Enhancing Real-Time Processing of YOLOv5-L Using Pruning Techniques in PyNetsPresso - Make it stand out</image:title>
      <image:caption>Whatever it is, the way you tell your story online can make all the difference.</image:caption>
    </image:image>
    <image:image>
      <image:loc>https://images.squarespace-cdn.com/content/v1/61af5d2d7a632327e6590167/c029ada0-17cc-4004-a448-6e2e7f3a6bee/Figure+6.png</image:loc>
      <image:title>Tech Blog - Enhancing Real-Time Processing of YOLOv5-L Using Pruning Techniques in PyNetsPresso - Make it stand out</image:title>
      <image:caption>Figure 6: Results saved in the output_path.</image:caption>
    </image:image>
    <image:image>
      <image:loc>https://images.squarespace-cdn.com/content/v1/61af5d2d7a632327e6590167/94047329-13c6-464a-8a7e-2cd4a5b47668/Python_5.png</image:loc>
      <image:title>Tech Blog - Enhancing Real-Time Processing of YOLOv5-L Using Pruning Techniques in PyNetsPresso - Make it stand out</image:title>
      <image:caption>Whatever it is, the way you tell your story online can make all the difference.</image:caption>
    </image:image>
    <image:image>
      <image:loc>https://images.squarespace-cdn.com/content/v1/61af5d2d7a632327e6590167/1c17218e-03e8-4731-9faf-aa07d4980d29/Python+7.png</image:loc>
      <image:title>Tech Blog - Enhancing Real-Time Processing of YOLOv5-L Using Pruning Techniques in PyNetsPresso - Make it stand out</image:title>
      <image:caption>Whatever it is, the way you tell your story online can make all the difference.</image:caption>
    </image:image>
    <image:image>
      <image:loc>https://images.squarespace-cdn.com/content/v1/61af5d2d7a632327e6590167/ba1d250b-3ee9-4c46-921b-33f4e7869cac/pynp_main.png</image:loc>
      <image:title>Tech Blog - Enhancing Real-Time Processing of YOLOv5-L Using Pruning Techniques in PyNetsPresso - Make it stand out</image:title>
      <image:caption>Whatever it is, the way you tell your story online can make all the difference.</image:caption>
    </image:image>
  </url>
  <url>
    <loc>https://www.nota.ai/notice</loc>
    <changefreq>daily</changefreq>
    <priority>0.75</priority>
    <lastmod>2025-12-26</lastmod>
  </url>
  <url>
    <loc>https://www.nota.ai/notice/2024-04-11</loc>
    <changefreq>monthly</changefreq>
    <priority>0.5</priority>
    <lastmod>2024-11-04</lastmod>
  </url>
  <url>
    <loc>https://www.nota.ai/notice/2024-03-18</loc>
    <changefreq>monthly</changefreq>
    <priority>0.5</priority>
    <lastmod>2024-11-04</lastmod>
  </url>
  <url>
    <loc>https://www.nota.ai/notice_contents-1</loc>
    <changefreq>daily</changefreq>
    <priority>0.75</priority>
    <lastmod>2024-10-04</lastmod>
  </url>
  <url>
    <loc>https://www.nota.ai/notice_contents-1/Blog Post Title One-4cwmc</loc>
    <changefreq>monthly</changefreq>
    <priority>0.5</priority>
    <lastmod>2024-09-11</lastmod>
  </url>
  <url>
    <loc>https://www.nota.ai/notice_contents-1/blog-post-title-two-8jphe</loc>
    <changefreq>monthly</changefreq>
    <priority>0.5</priority>
    <lastmod>2020-05-20</lastmod>
  </url>
  <url>
    <loc>https://www.nota.ai/notice_contents-1/blog-post-title-three-fwlh3</loc>
    <changefreq>monthly</changefreq>
    <priority>0.5</priority>
    <lastmod>2020-05-20</lastmod>
  </url>
  <url>
    <loc>https://www.nota.ai/notice_contents-1/blog-post-title-four-8g89l</loc>
    <changefreq>monthly</changefreq>
    <priority>0.5</priority>
    <lastmod>2020-05-20</lastmod>
  </url>
  <url>
    <loc>https://www.nota.ai/news</loc>
    <changefreq>daily</changefreq>
    <priority>0.75</priority>
    <lastmod>2025-12-29</lastmod>
  </url>
  <url>
    <loc>https://www.nota.ai/news/enpressrelease2512</loc>
    <changefreq>monthly</changefreq>
    <priority>0.5</priority>
    <lastmod>2025-12-30</lastmod>
    <image:image>
      <image:loc>https://images.squarespace-cdn.com/content/v1/61af5d2d7a632327e6590167/b527dd7c-23a3-4344-a1f4-3ecffcd3c303/Nota+AI_PressRelease_Cover.png</image:loc>
      <image:title>newsroom - Nota AI to Supply AI Optimization Technology for Samsung Electronics’ Next-Generation Mobile AP ‘Exynos 2600’… Solidifying Its Position as a Leader in On-Device AI - Make it stand out</image:title>
      <image:caption>Whatever it is, the way you tell your story online can make all the difference.</image:caption>
    </image:image>
  </url>
  <url>
    <loc>https://www.nota.ai/news/enpressrelease2511</loc>
    <changefreq>monthly</changefreq>
    <priority>0.5</priority>
    <lastmod>2025-12-29</lastmod>
    <image:image>
      <image:loc>https://images.squarespace-cdn.com/content/v1/61af5d2d7a632327e6590167/f4896863-4dba-4497-856b-6d941fe4a3ea/PR_S.LSI_Thumb_Dark_v1_251107+%282%29.png</image:loc>
      <image:title>newsroom - Nota AI Signs Technology Collaboration Agreement with Samsung Electronics for Exynos AI Optimization “Driving the Popularization of On-Device Generative AI” - Make it stand out</image:title>
      <image:caption>Whatever it is, the way you tell your story online can make all the difference.</image:caption>
    </image:image>
  </url>
  <url>
    <loc>https://www.nota.ai/contact-us</loc>
    <changefreq>daily</changefreq>
    <priority>0.75</priority>
    <lastmod>2025-07-10</lastmod>
  </url>
  <url>
    <loc>https://www.nota.ai/nvidia-jetson</loc>
    <changefreq>daily</changefreq>
    <priority>0.75</priority>
    <lastmod>2023-10-17</lastmod>
    <image:image>
      <image:loc>https://images.squarespace-cdn.com/content/v1/61af5d2d7a632327e6590167/b7a85b3b-a73f-445c-982e-b33521544621/nvidia-preferred-partner-badge-rgb-for-screen.png</image:loc>
    </image:image>
    <image:image>
      <image:loc>https://images.squarespace-cdn.com/content/v1/61af5d2d7a632327e6590167/b0e21b23-4d89-4ceb-8b0b-a1ba6f28c2a6/NP_platform.png</image:loc>
    </image:image>
    <image:image>
      <image:loc>https://images.squarespace-cdn.com/content/v1/61af5d2d7a632327e6590167/d07c2ad6-1dd7-4bf9-9b34-f13ffb4df3e8/NP_endtoend_diagram_230314.png</image:loc>
    </image:image>
    <image:image>
      <image:loc>https://images.squarespace-cdn.com/content/v1/61af5d2d7a632327e6590167/a474ffb6-68d0-434c-9028-412f9e310ef3/%E1%84%89%E1%85%B3%E1%84%8F%E1%85%B3%E1%84%85%E1%85%B5%E1%86%AB%E1%84%89%E1%85%A3%E1%86%BA+2023-03-17+%E1%84%8B%E1%85%A9%E1%84%92%E1%85%AE+3.10.27.png</image:loc>
    </image:image>
    <image:image>
      <image:loc>https://images.squarespace-cdn.com/content/v1/61af5d2d7a632327e6590167/5807d791-c480-44e9-9604-aa5566b243bf/accuracy.png</image:loc>
    </image:image>
    <image:image>
      <image:loc>https://images.squarespace-cdn.com/content/v1/61af5d2d7a632327e6590167/aa9d797c-6818-47a1-b2dc-c87c9103ca85/np_process.png</image:loc>
    </image:image>
    <image:image>
      <image:loc>https://images.squarespace-cdn.com/content/v1/61af5d2d7a632327e6590167/d0c10cef-dce8-477d-83e5-ed6f29bc9145/07+Sub-module_MS%403x-8.png</image:loc>
    </image:image>
    <image:image>
      <image:loc>https://images.squarespace-cdn.com/content/v1/61af5d2d7a632327e6590167/6457216c-a19a-49b0-92cf-c1e936f2ddef/Sub-module_MC.png</image:loc>
    </image:image>
    <image:image>
      <image:loc>https://images.squarespace-cdn.com/content/v1/61af5d2d7a632327e6590167/0d460b76-c271-4bf9-8914-b119922666d8/07+Sub-module_ML.png</image:loc>
    </image:image>
  </url>
  <url>
    <loc>https://www.nota.ai/partner-arm-avhethosu65</loc>
    <changefreq>daily</changefreq>
    <priority>0.75</priority>
    <lastmod>2023-07-27</lastmod>
    <image:image>
      <image:loc>https://images.squarespace-cdn.com/content/v1/61af5d2d7a632327e6590167/6633a563-bbf3-4ee4-b387-64557f0da443/Artboard+2%402x.png</image:loc>
    </image:image>
    <image:image>
      <image:loc>https://images.squarespace-cdn.com/content/v1/61af5d2d7a632327e6590167/6cf75082-d378-454d-b5d2-ea907377d26b/Desktop+Vector+Mockup%403x.png</image:loc>
    </image:image>
    <image:image>
      <image:loc>https://images.squarespace-cdn.com/content/v1/61af5d2d7a632327e6590167/028bc044-22ae-416e-a348-1bc94d1b1b53/table_latency%28ms%29.png</image:loc>
    </image:image>
    <image:image>
      <image:loc>https://images.squarespace-cdn.com/content/v1/61af5d2d7a632327e6590167/6c476c17-22f4-4ca3-8fc0-ac10f0659e99/table_modelsize%28kb%29.png</image:loc>
    </image:image>
  </url>
  <url>
    <loc>https://www.nota.ai/aboutus</loc>
    <changefreq>daily</changefreq>
    <priority>0.75</priority>
    <lastmod>2026-03-31</lastmod>
    <image:image>
      <image:loc>https://images.squarespace-cdn.com/content/v1/61af5d2d7a632327e6590167/a640bb9b-16a0-4c1e-8ace-ac45bb44937b/company-background-blue.png</image:loc>
      <image:title>About us</image:title>
    </image:image>
    <image:image>
      <image:loc>https://images.squarespace-cdn.com/content/v1/61af5d2d7a632327e6590167/4749b74a-7cf5-475b-9c5e-60f7cb137ad5/%E1%84%82%E1%85%A9%E1%84%90%E1%85%A1_%E1%84%92%E1%85%AC%E1%84%8B%E1%85%B4%E1%84%89%E1%85%B5%E1%86%AF.JPG</image:loc>
      <image:title>About us</image:title>
    </image:image>
    <image:image>
      <image:loc>https://images.squarespace-cdn.com/content/v1/61af5d2d7a632327e6590167/ebb720ab-61ed-444d-9d64-d6444d4e15f7/%E1%84%82%E1%85%A9%E1%84%90%E1%85%A1_%E1%84%8B%E1%85%B5%E1%86%AB%E1%84%86%E1%85%AE%E1%86%AF2.JPG</image:loc>
      <image:title>About us</image:title>
    </image:image>
    <image:image>
      <image:loc>https://images.squarespace-cdn.com/content/v1/61af5d2d7a632327e6590167/139fa6ec-713a-4832-b7c8-19d122e47afa/%E1%84%82%E1%85%A9%E1%84%90%E1%85%A1_%E1%84%8B%E1%85%B5%E1%86%AB%E1%84%86%E1%85%AE%E1%86%AF9.JPG</image:loc>
      <image:title>About us</image:title>
    </image:image>
    <image:image>
      <image:loc>https://images.squarespace-cdn.com/content/v1/61af5d2d7a632327e6590167/2f698fb7-d000-4eff-88f6-cad41b1cfeda/DSC09728.JPG</image:loc>
      <image:title>About us</image:title>
    </image:image>
    <image:image>
      <image:loc>https://images.squarespace-cdn.com/content/v1/61af5d2d7a632327e6590167/5d9f6f04-887b-4be8-b1bd-be3638b3bc47/7C203058+%281%29.JPG</image:loc>
      <image:title>About us</image:title>
    </image:image>
    <image:image>
      <image:loc>https://images.squarespace-cdn.com/content/v1/61af5d2d7a632327e6590167/16d96fe5-e885-42bb-9efc-cb60d4dfcb12/%ED%9A%8C%EC%9D%98%EC%8B%A4_CTO+%ED%9A%8C%EC%9D%98%EC%A4%91_.jpg</image:loc>
      <image:title>About us</image:title>
    </image:image>
    <image:image>
      <image:loc>https://images.squarespace-cdn.com/content/v1/61af5d2d7a632327e6590167/27355782-9284-47ef-bb59-d0ec75e11404/%E1%84%82%E1%85%A9%E1%84%90%E1%85%A1_%E1%84%85%E1%85%A1%E1%84%8B%E1%85%AE%E1%86%AB%E1%84%8C%E1%85%B5.jpg</image:loc>
      <image:title>About us</image:title>
    </image:image>
    <image:image>
      <image:loc>https://images.squarespace-cdn.com/content/v1/61af5d2d7a632327e6590167/3b50da95-ef32-4eee-bdaa-647185ea1db0/EverFocus_logo.png</image:loc>
      <image:title>About us</image:title>
      <image:caption>EveFocus logo with the text 'EverFocus' in stylized font, with a teal accent on the 'o' and a teal swoosh over the 'o'.</image:caption>
    </image:image>
    <image:image>
      <image:loc>https://images.squarespace-cdn.com/content/v1/61af5d2d7a632327e6590167/4de0b046-da76-4a64-b0a1-fa7e0f42481d/partners_aws.png</image:loc>
      <image:title>About us</image:title>
      <image:caption>AWS Partner Network logo with the text 'aws partner network'.</image:caption>
    </image:image>
    <image:image>
      <image:loc>https://images.squarespace-cdn.com/content/v1/61af5d2d7a632327e6590167/b181dca2-a244-49ad-85e3-90eb68c1db58/partners_DeuscheTelekom.png</image:loc>
      <image:title>About us</image:title>
      <image:caption>Deutsche Telekom logo in shades of pink and gray.</image:caption>
    </image:image>
    <image:image>
      <image:loc>https://images.squarespace-cdn.com/content/v1/61af5d2d7a632327e6590167/383dedb3-4958-4fbe-a46c-2e2ec71cec29/ACROSSER_logo.png</image:loc>
      <image:title>About us</image:title>
      <image:caption>A logo with a stylized red arrow symbol followed by the word 'ACROSSER' in black uppercase letters.</image:caption>
    </image:image>
    <image:image>
      <image:loc>https://images.squarespace-cdn.com/content/v1/61af5d2d7a632327e6590167/47ef6940-6976-498a-96b6-702038e1f45c/partners_AXIS.png</image:loc>
      <image:title>About us</image:title>
      <image:caption>AXIS Communications logo with black text and a yellow and red triangular design</image:caption>
    </image:image>
    <image:image>
      <image:loc>https://images.squarespace-cdn.com/content/v1/61af5d2d7a632327e6590167/e06e976e-7804-48bd-a5b0-5612549de0cc/partners_SKtelecom.png</image:loc>
      <image:title>About us</image:title>
      <image:caption>SK Telecom logo with stylized red, orange, and white butterfly icon</image:caption>
    </image:image>
    <image:image>
      <image:loc>https://images.squarespace-cdn.com/content/v1/61af5d2d7a632327e6590167/18b5baf1-d4c0-4379-b6c8-798abf2fb739/partners_arm.png</image:loc>
      <image:title>About us</image:title>
      <image:caption>The word 'arm' written in blue lowercase letters.</image:caption>
    </image:image>
    <image:image>
      <image:loc>https://images.squarespace-cdn.com/content/v1/61af5d2d7a632327e6590167/14b939b6-75d3-45f0-a03d-1c06894176e2/partners_intel.png</image:loc>
      <image:title>About us</image:title>
      <image:caption>Intel logo with a blue lowercase 'i' and the word 'intel' in black.</image:caption>
    </image:image>
    <image:image>
      <image:loc>https://images.squarespace-cdn.com/content/v1/61af5d2d7a632327e6590167/6a0965ff-121b-49b1-8d6d-049cf4489d32/partners_Telechips.png</image:loc>
      <image:title>About us</image:title>
      <image:caption>Telechips company logo in blue text on a white background.</image:caption>
    </image:image>
    <image:image>
      <image:loc>https://images.squarespace-cdn.com/content/v1/61af5d2d7a632327e6590167/852294d6-5d0f-47b3-8dad-8b93b2280f4c/partners_Everocus.png</image:loc>
      <image:title>About us</image:title>
      <image:caption>Everfocus logo with the tagline 'Your Safety, Our Focus'</image:caption>
    </image:image>
    <image:image>
      <image:loc>https://images.squarespace-cdn.com/content/v1/61af5d2d7a632327e6590167/519ea45c-75d7-4100-9878-57c61ff453b2/partners_LGCNS.png</image:loc>
      <image:title>About us</image:title>
      <image:caption>LG CNS logo with red and gray branding on a white background.</image:caption>
    </image:image>
    <image:image>
      <image:loc>https://images.squarespace-cdn.com/content/v1/61af5d2d7a632327e6590167/9f2a4a9a-4950-48a1-be5c-f69cf91d4496/partners_RENESAS.png</image:loc>
      <image:title>About us</image:title>
      <image:caption>Renesas logo on a white background</image:caption>
    </image:image>
    <image:image>
      <image:loc>https://images.squarespace-cdn.com/content/v1/61af5d2d7a632327e6590167/dd783095-5863-4b50-80a0-4a022853b099/partners_Mircrosoft.png</image:loc>
      <image:title>About us</image:title>
      <image:caption>Microsoft logo with four colored squares and the word 'Microsoft' next to it.</image:caption>
    </image:image>
    <image:image>
      <image:loc>https://images.squarespace-cdn.com/content/v1/61af5d2d7a632327e6590167/e55c9fb3-ea45-413d-b13f-d3e6affdc099/partners_LGU%2B.png</image:loc>
      <image:title>About us</image:title>
      <image:caption>LG U+ logo with red circle and white smiley face, gray text LG U+</image:caption>
    </image:image>
    <image:image>
      <image:loc>https://images.squarespace-cdn.com/content/v1/61af5d2d7a632327e6590167/1666f9b1-94f2-41e3-be04-a99a59a2578e/partners_SONY.png</image:loc>
      <image:title>About us</image:title>
      <image:caption>Sony logo on a white background.</image:caption>
    </image:image>
    <image:image>
      <image:loc>https://images.squarespace-cdn.com/content/v1/61af5d2d7a632327e6590167/6dbb54a3-cf9e-4bee-9681-77ff7b64c920/partners_NVIDIA.png</image:loc>
      <image:title>About us</image:title>
      <image:caption>NVIDIA logo with green eye symbol and black text on white background.</image:caption>
    </image:image>
    <image:image>
      <image:loc>https://images.squarespace-cdn.com/content/v1/61af5d2d7a632327e6590167/131f0892-fcfe-4688-a7b6-6537b64ed1cc/partners_Qualcomm.png</image:loc>
      <image:title>About us</image:title>
      <image:caption>Qualcomm logo in blue text on a white background</image:caption>
    </image:image>
    <image:image>
      <image:loc>https://images.squarespace-cdn.com/content/v1/61af5d2d7a632327e6590167/b958933d-9cca-4882-9916-79e8c97a4f1f/partners_samsung.png</image:loc>
      <image:title>About us</image:title>
      <image:caption>Samsung logo in bold blue letters on a white background.</image:caption>
    </image:image>
    <image:image>
      <image:loc>https://images.squarespace-cdn.com/content/v1/61af5d2d7a632327e6590167/24db228b-3f1f-4a94-9715-72030c8bc521/Main_partners_FuriosaAI.png</image:loc>
      <image:title>About us</image:title>
      <image:caption>FURIOSA logo with stylized red lightning bolt symbol on the right</image:caption>
    </image:image>
    <image:image>
      <image:loc>https://images.squarespace-cdn.com/content/v1/61af5d2d7a632327e6590167/33abb8bb-daa4-4903-af55-fa552c65220a/partners_SAMSUNGSDS.png</image:loc>
      <image:title>About us</image:title>
      <image:caption>Samsung SDS logo in blue text on a white background.</image:caption>
    </image:image>
    <image:image>
      <image:loc>https://images.squarespace-cdn.com/content/v1/61af5d2d7a632327e6590167/e16204a4-b173-4f50-a5d0-495fc1508e46/Main_partners_Pytorch.png</image:loc>
      <image:title>About us</image:title>
      <image:caption>PyTorch logo with an abstract flame icon and the word 'PyTorch' next to it.</image:caption>
    </image:image>
    <image:image>
      <image:loc>https://images.squarespace-cdn.com/content/v1/61af5d2d7a632327e6590167/528f06d1-23cd-43bf-9039-621ffce9e411/a.png</image:loc>
    </image:image>
  </url>
  <url>
    <loc>https://www.nota.ai/newsroom</loc>
    <changefreq>daily</changefreq>
    <priority>0.75</priority>
    <lastmod>2026-03-10</lastmod>
  </url>
  <url>
    <loc>https://www.nota.ai/netspresso</loc>
    <changefreq>daily</changefreq>
    <priority>0.75</priority>
    <lastmod>2026-02-13</lastmod>
    <image:image>
      <image:loc>https://images.squarespace-cdn.com/content/v1/61af5d2d7a632327e6590167/9e9dc223-a53d-4924-86c7-0f7c39af69c7/NetsPresso_Basic_Logo_Navy.png</image:loc>
    </image:image>
  </url>
  <url>
    <loc>https://www.nota.ai/careers</loc>
    <changefreq>daily</changefreq>
    <priority>0.75</priority>
    <lastmod>2025-07-14</lastmod>
  </url>
  <url>
    <loc>https://www.nota.ai/ai-solutions-notavisionagent</loc>
    <changefreq>daily</changefreq>
    <priority>0.75</priority>
    <lastmod>2026-01-23</lastmod>
    <image:image>
      <image:loc>https://images.squarespace-cdn.com/content/v1/61af5d2d7a632327e6590167/5f3a28dd-d68f-411c-bf61-5324b07a9876/%40keyvisual.png</image:loc>
    </image:image>
  </url>
  <url>
    <loc>https://www.nota.ai/ai-solutions-industrialsafety</loc>
    <changefreq>daily</changefreq>
    <priority>0.75</priority>
    <lastmod>2026-01-24</lastmod>
  </url>
  <url>
    <loc>https://www.nota.ai/ai-solutions-surveillance</loc>
    <changefreq>daily</changefreq>
    <priority>0.75</priority>
    <lastmod>2025-08-05</lastmod>
  </url>
  <url>
    <loc>https://www.nota.ai/ai-solutions-its</loc>
    <changefreq>daily</changefreq>
    <priority>0.75</priority>
    <lastmod>2025-08-04</lastmod>
  </url>
  <url>
    <loc>https://www.nota.ai/ai-solutions-dms-fr</loc>
    <changefreq>daily</changefreq>
    <priority>0.75</priority>
    <lastmod>2025-08-04</lastmod>
  </url>
  <url>
    <loc>https://www.nota.ai/main</loc>
    <changefreq>daily</changefreq>
    <priority>1.0</priority>
    <lastmod>2026-03-31</lastmod>
    <image:image>
      <image:loc>https://images.squarespace-cdn.com/content/v1/61af5d2d7a632327e6590167/45324a2e-21c6-4bdf-9abc-ac44352d0da3/KakaoTalk_Photo_2024-11-28-22-17-02.png</image:loc>
    </image:image>
  </url>
  <url>
    <loc>https://www.nota.ai/nva-introduction</loc>
    <changefreq>daily</changefreq>
    <priority>0.75</priority>
    <lastmod>2026-03-27</lastmod>
  </url>
  <url>
    <loc>https://www.nota.ai/ew2026</loc>
    <changefreq>daily</changefreq>
    <priority>0.75</priority>
    <lastmod>2026-03-06</lastmod>
  </url>
</urlset>

