Caching is the single biggest performance lever. Next.js provides multiple caching layers. Here is how to use each one effectively.
Next.js Cache Layers
- Request Memoization — Deduplicates identical fetch calls in a single render pass
- Data Cache — Persists fetch results across requests on the server
- Full Route Cache — Caches rendered HTML and RSC payload at build time
- Router Cache — Client-side cache of visited routes
Fetch-Level Caching
// Cached indefinitely (default for static pages)
const data = await fetch("https://api.example.com/products", {
cache: "force-cache",
});
// Revalidate every 60 seconds (ISR)
const data = await fetch("https://api.example.com/products", {
next: { revalidate: 60 },
});
// Never cache (always fresh)
const data = await fetch("https://api.example.com/cart", {
cache: "no-store",
});
// Tag-based revalidation
const data = await fetch("https://api.example.com/products", {
next: { tags: ["products"] },
});
Route Segment Config
// app/(site)/blog/page.tsx
// Revalidate the entire page every 5 minutes
export const revalidate = 300;
// Force dynamic rendering (no caching)
export const dynamic = "force-dynamic";
// Force static rendering
export const dynamic = "force-static";
On-Demand Revalidation
// app/api/revalidate/route.ts
import { NextRequest, NextResponse } from "next/server";
import { revalidatePath, revalidateTag } from "next/cache";
export async function POST(request: NextRequest) {
const secret = request.headers.get("x-revalidation-secret");
if (secret !== process.env.REVALIDATION_SECRET) {
return NextResponse.json({ error: "Unauthorized" }, { status: 401 });
}
const body = await request.json();
// Revalidate by tag (fine-grained)
if (body.tag) {
revalidateTag(body.tag);
return NextResponse.json({ revalidated: true, tag: body.tag });
}
// Revalidate by path (broader)
if (body.path) {
revalidatePath(body.path);
return NextResponse.json({ revalidated: true, path: body.path });
}
return NextResponse.json({ error: "No tag or path provided" }, { status: 400 });
}
Redis Cache Layer
// lib/cache.ts
import { Redis } from "@upstash/redis";
const redis = Redis.fromEnv();
interface CacheOptions {
ttl?: number; // seconds
tags?: string[];
}
export async function cached<T>(
key: string,
fetcher: () => Promise<T>,
options: CacheOptions = {}
): Promise<T> {
const { ttl = 3600 } = options;
// Check cache first
const cached = await redis.get<T>(key);
if (cached !== null) {
return cached;
}
// Fetch fresh data
const data = await fetcher();
// Store in cache
await redis.set(key, JSON.stringify(data), { ex: ttl });
// Track tag associations for invalidation
if (options.tags) {
for (const tag of options.tags) {
await redis.sadd(`tag:${tag}`, key);
}
}
return data;
}
export async function invalidateByTag(tag: string): Promise<number> {
const keys = await redis.smembers(`tag:${tag}`);
if (keys.length === 0) return 0;
const pipeline = redis.pipeline();
for (const key of keys) {
pipeline.del(key);
}
pipeline.del(`tag:${tag}`);
await pipeline.exec();
return keys.length;
}
export async function invalidateByKey(key: string): Promise<void> {
await redis.del(key);
}
Use Redis Cache in Server Components
// app/(site)/products/page.tsx
import { cached } from "@/lib/cache";
import { db } from "@/db";
export default async function ProductsPage() {
const products = await cached(
"products:all",
async () => {
return db.query.products.findMany({
orderBy: (p, { desc }) => [desc(p.createdAt)],
limit: 50,
});
},
{ ttl: 300, tags: ["products"] }
);
return (
<div className="grid grid-cols-3 gap-6">
{products.map((product) => (
<div key={product.id} className="border rounded-lg p-4">
<h3 className="font-medium">{product.name}</h3>
<p className="text-muted-foreground">${product.price}</p>
</div>
))}
</div>
);
}
Stale-While-Revalidate Pattern
// lib/swr-cache.ts
import { Redis } from "@upstash/redis";
const redis = Redis.fromEnv();
interface SWRCacheOptions {
staleTime: number; // seconds until stale
maxAge: number; // seconds until completely expired
}
export async function swrCached<T>(
key: string,
fetcher: () => Promise<T>,
options: SWRCacheOptions
): Promise<T> {
const cached = await redis.get<{ data: T; timestamp: number }>(key);
if (cached) {
const age = (Date.now() - cached.timestamp) / 1000;
if (age < options.staleTime) {
// Fresh — return immediately
return cached.data;
}
if (age < options.maxAge) {
// Stale — return cached, revalidate in background
revalidateInBackground(key, fetcher, options.maxAge);
return cached.data;
}
}
// Expired or missing — fetch synchronously
return fetchAndCache(key, fetcher, options.maxAge);
}
async function fetchAndCache<T>(
key: string,
fetcher: () => Promise<T>,
maxAge: number
): Promise<T> {
const data = await fetcher();
await redis.set(
key,
JSON.stringify({ data, timestamp: Date.now() }),
{ ex: maxAge }
);
return data;
}
function revalidateInBackground<T>(
key: string,
fetcher: () => Promise<T>,
maxAge: number
): void {
// Fire and forget
fetchAndCache(key, fetcher, maxAge).catch(console.error);
}
Cache Warming
// scripts/warm-cache.ts
import { cached } from "@/lib/cache";
import { db } from "@/db";
async function warmCache() {
console.log("Warming cache...");
// Warm product pages
const products = await db.query.products.findMany();
for (const product of products) {
await cached(
`product:${product.slug}`,
async () => product,
{ ttl: 3600, tags: ["products"] }
);
}
// Warm blog posts
const posts = await db.query.posts.findMany({ limit: 100 });
for (const post of posts) {
await cached(
`post:${post.slug}`,
async () => post,
{ ttl: 3600, tags: ["posts"] }
);
}
console.log(`Warmed ${products.length} products and ${posts.length} posts`);
}
warmCache().catch(console.error);
Caching Decision Matrix
| Data Type | Strategy | TTL | Invalidation |
|---|---|---|---|
| Static pages | Full Route Cache | Build time | Redeploy |
| Blog posts | ISR | 5-60 min | On-demand |
| Product listings | Redis + ISR | 5 min | Tag-based |
| User-specific data | No cache | - | - |
| API responses | Fetch cache | 1-5 min | Tag-based |
| Search results | Redis SWR | 2 min stale, 10 min max | TTL |
Need Performance Optimization?
We optimize caching strategies that reduce load times and server costs. Contact us to discuss your performance goals.