diff --git a/package-lock.json b/package-lock.json index 4ce4d21..f0ed62c 100644 --- a/package-lock.json +++ b/package-lock.json @@ -58,7 +58,7 @@ "typescript": "5.4.5" }, "peerDependencies": { - "next": "^14.1.0" + "next": "^14.1.0 || ^15.0.0" } }, "node_modules/@ampproject/remapping": { diff --git a/package.json b/package.json index e1f67d8..35194c5 100644 --- a/package.json +++ b/package.json @@ -6,7 +6,7 @@ "bin": { "@dbbs/next-serverless-deployment": "./dist/commands/index.js" }, - "types": "dist/types/index.d.ts", + "types": "dist/index.d.ts", "files": [ "dist", "README.md" @@ -81,7 +81,7 @@ "yargs": "17.7.2" }, "peerDependencies": { - "next": "^14.1.0" + "next": "^14.1.0 || ^15.0.0" }, "lint-staged": { "src/**/*.{js,jsx,ts,tsx}": [ diff --git a/src/build/cache/revalidateServer.ts b/src/build/cache/revalidateServer.ts index e2c5993..728a68c 100644 --- a/src/build/cache/revalidateServer.ts +++ b/src/build/cache/revalidateServer.ts @@ -1,67 +1,13 @@ import express from 'express' import { json } from 'body-parser' -import { S3 } from '@aws-sdk/client-s3' -import { DynamoDB, type AttributeValue } from '@aws-sdk/client-dynamodb' import http from 'http' -import { chunkArray } from '../../common/array' const port = parseInt(process.env.PORT || '', 10) || 3000 const nextServerPort = 3001 const nextServerHostname = process.env.HOSTNAME || '0.0.0.0' -const PAGE_CACHE_EXTENSIONS = ['json', 'html', 'rsc'] -const CHUNK_LIMIT = 1000 -const DYNAMODB_BATCH_LIMIT = 25 - interface RevalidateBody { paths: string[] - cacheSegment?: string -} - -const s3 = new S3({ region: process.env.AWS_REGION }) -const dynamoDB = new DynamoDB({ region: process.env.AWS_REGION }) - -async function deleteS3Objects(bucketName: string, keys: string[]) { - if (!keys.length) return - - // Delete objects in chunks to stay within AWS limits - await Promise.allSettled( - chunkArray(keys, CHUNK_LIMIT).map((chunk) => { - return s3.deleteObjects({ - Bucket: bucketName, - Delete: { Objects: chunk.map((Key) => ({ Key })) } - }) - }) - ) -} - -async function batchDeleteFromDynamoDB(tableName: string, items: Record[]) { - if (!items.length) return - - // Split items into chunks of 25 (DynamoDB batch limit) - const chunks = chunkArray(items, DYNAMODB_BATCH_LIMIT) - - await Promise.all( - chunks.map(async (chunk) => { - const deleteRequests = chunk.map((item) => ({ - DeleteRequest: { - Key: item - } - })) - - try { - await dynamoDB.batchWriteItem({ - RequestItems: { - [tableName]: deleteRequests - } - }) - } catch (error) { - console.error('Error in batch delete:', error) - // Handle unprocessed items if needed - throw error - } - }) - ) } const app = express() @@ -70,52 +16,26 @@ app.use(json()) app.post('/api/revalidate-pages', async (req, res) => { try { - const { paths, cacheSegment } = req.body as RevalidateBody + const { paths } = req.body as RevalidateBody if (!paths.length) { res.status(400).json({ Message: 'paths is required.' }).end() - } else { - const attributeValues: Record = {} - const keyConditionExpression = - paths.length === 1 ? 'pageKey = :path0' : 'pageKey IN (' + paths.map((_, i) => `:path${i}`).join(',') + ')' - - paths.forEach((path, index) => { - attributeValues[`:path${index}`] = { S: path.substring(1) } - }) - - if (cacheSegment) { - attributeValues[':segment'] = { S: cacheSegment } - } - - const result = await dynamoDB.query({ - TableName: process.env.DYNAMODB_CACHE_TABLE!, - IndexName: 'cacheKey-index', - KeyConditionExpression: keyConditionExpression, - FilterExpression: cacheSegment ? 'cacheKey = :segment' : undefined, - ExpressionAttributeValues: attributeValues - }) + return + } - if (result?.Items?.length) { - const s3KeysToDelete = result.Items.flatMap((item) => { - return PAGE_CACHE_EXTENSIONS.map((ext) => `${item.s3Key.S}.${ext}`) + await Promise.all( + paths.map((path) => + http.get({ + hostname: nextServerHostname, + port: nextServerPort, + path }) - await deleteS3Objects(process.env.STATIC_BUCKET_NAME!, s3KeysToDelete) - await batchDeleteFromDynamoDB(process.env.DYNAMODB_CACHE_TABLE!, result.Items) - } - - await Promise.all( - paths.map((path) => - http.get({ - hostname: nextServerHostname, - port: nextServerPort, - path - }) - ) ) - } + ) res.status(200).json({ Message: 'Revalidated.' }) } catch (err) { + console.error('Failed to revalidate:', err) res.status(400).json({ Message: err }) } }) diff --git a/src/cacheHandler/strategy/s3.spec.ts b/src/cacheHandler/strategy/s3.spec.ts index 82ff6a3..d84e1cd 100644 --- a/src/cacheHandler/strategy/s3.spec.ts +++ b/src/cacheHandler/strategy/s3.spec.ts @@ -1,5 +1,5 @@ import { CacheEntry, CacheContext } from '@dbbs/next-cache-handler-core' -import { S3Cache } from './s3' +import { S3Cache, TAG_PREFIX } from './s3' const mockHtmlPage = '

My Page

' @@ -22,6 +22,7 @@ const mockCacheContext: CacheContext = { const mockBucketName = 'test-bucket' const cacheKey = 'test' +const pageKey = 'index' const s3Cache = new S3Cache(mockBucketName) const store = new Map() @@ -61,91 +62,108 @@ jest.mock('@aws-sdk/client-s3', () => { } }) +const mockDynamoQuery = jest.fn() +const mockDynamoPutItem = jest.fn() +jest.mock('@aws-sdk/client-dynamodb', () => { + return { + DynamoDB: jest.fn().mockReturnValue({ + query: jest.fn((...params) => mockDynamoQuery(...params)), + putItem: jest.fn((...params) => mockDynamoPutItem(...params)) + }) + } +}) + describe('S3Cache', () => { afterEach(() => { jest.clearAllMocks() + store.clear() }) afterAll(() => { jest.restoreAllMocks() }) - it('should set and read the cache for page router', async () => { + it('get should return null', async () => { + const result = await s3Cache.get() + expect(result).toBeNull() + }) + + it('should set cache for page router', async () => { await s3Cache.set(cacheKey, cacheKey, mockCacheEntry, mockCacheContext) expect(s3Cache.client.putObject).toHaveBeenCalledTimes(2) expect(s3Cache.client.putObject).toHaveBeenNthCalledWith(1, { Bucket: mockBucketName, Key: `${cacheKey}/${cacheKey}.html`, Body: mockHtmlPage, - ContentType: 'text/html' + ContentType: 'text/html', + Metadata: { + 'Cache-Fragment-Key': cacheKey + } }) expect(s3Cache.client.putObject).toHaveBeenNthCalledWith(2, { Bucket: mockBucketName, Key: `${cacheKey}/${cacheKey}.json`, - Body: JSON.stringify(mockCacheEntry.value.pageData), - ContentType: 'application/json' + Body: JSON.stringify(mockCacheEntry), + ContentType: 'application/json', + Metadata: { + 'Cache-Fragment-Key': cacheKey + } }) - - const result = await s3Cache.get(cacheKey, cacheKey) - expect(result).toEqual(mockCacheEntry.value.pageData) - expect(s3Cache.client.getObject).toHaveBeenCalledTimes(1) - expect(s3Cache.client.getObject).toHaveBeenCalledWith({ - Bucket: mockBucketName, - Key: `${cacheKey}/${cacheKey}.json` + expect(mockDynamoPutItem).toHaveBeenCalledWith({ + TableName: process.env.DYNAMODB_CACHE_TABLE, + Item: { + pageKey: { S: cacheKey }, + cacheKey: { S: cacheKey }, + s3Key: { S: `${cacheKey}/${cacheKey}` }, + tags: { S: '' }, + createdAt: { S: expect.any(String) } + } }) }) - it('should set and read the cache for app router', async () => { + it('should set cache for app router', async () => { await s3Cache.set(cacheKey, cacheKey, mockCacheEntry, { ...mockCacheContext, isAppRouter: true }) - expect(s3Cache.client.putObject).toHaveBeenCalledTimes(2) + expect(s3Cache.client.putObject).toHaveBeenCalledTimes(3) expect(s3Cache.client.putObject).toHaveBeenNthCalledWith(1, { Bucket: mockBucketName, Key: `${cacheKey}/${cacheKey}.html`, Body: mockHtmlPage, - ContentType: 'text/html' + ContentType: 'text/html', + Metadata: { + 'Cache-Fragment-Key': cacheKey + } }) expect(s3Cache.client.putObject).toHaveBeenNthCalledWith(2, { + Bucket: mockBucketName, + Key: `${cacheKey}/${cacheKey}.json`, + Body: JSON.stringify(mockCacheEntry), + ContentType: 'application/json', + Metadata: { + 'Cache-Fragment-Key': cacheKey + } + }) + expect(s3Cache.client.putObject).toHaveBeenNthCalledWith(3, { Bucket: mockBucketName, Key: `${cacheKey}/${cacheKey}.rsc`, Body: mockCacheEntry.value.pageData, - ContentType: 'text/x-component' + ContentType: 'text/x-component', + Metadata: { + 'Cache-Fragment-Key': cacheKey + } }) - - const result = await s3Cache.get(cacheKey, cacheKey) - expect(result).toEqual(mockCacheEntry.value.pageData) - expect(s3Cache.client.getObject).toHaveBeenCalledTimes(1) - expect(s3Cache.client.getObject).toHaveBeenCalledWith({ - Bucket: mockBucketName, - Key: `${cacheKey}/${cacheKey}.json` + expect(mockDynamoPutItem).toHaveBeenCalledWith({ + TableName: process.env.DYNAMODB_CACHE_TABLE, + Item: { + pageKey: { S: cacheKey }, + cacheKey: { S: cacheKey }, + s3Key: { S: `${cacheKey}/${cacheKey}` }, + tags: { S: '' }, + createdAt: { S: expect.any(String) } + } }) }) it('should delete cache value', async () => { - await s3Cache.set(cacheKey, cacheKey, mockCacheEntry, mockCacheContext) - expect(s3Cache.client.putObject).toHaveBeenCalledTimes(2) - expect(s3Cache.client.putObject).toHaveBeenNthCalledWith(1, { - Bucket: mockBucketName, - Key: `${cacheKey}/${cacheKey}.html`, - Body: mockHtmlPage, - ContentType: 'text/html' - }) - expect(s3Cache.client.putObject).toHaveBeenNthCalledWith(2, { - Bucket: mockBucketName, - Key: `${cacheKey}/${cacheKey}.json`, - Body: JSON.stringify(mockCacheEntry.value.pageData), - ContentType: 'application/json' - }) - - const result = await s3Cache.get(cacheKey, cacheKey) - expect(result).toEqual(mockCacheEntry.value.pageData) - expect(s3Cache.client.getObject).toHaveBeenCalledTimes(1) - expect(s3Cache.client.getObject).toHaveBeenCalledWith({ - Bucket: mockBucketName, - Key: `${cacheKey}/${cacheKey}.json` - }) - await s3Cache.delete(cacheKey, cacheKey) - const updatedResult = await s3Cache.get(cacheKey, cacheKey) - expect(updatedResult).toBeNull() expect(s3Cache.client.deleteObjects).toHaveBeenCalledTimes(1) expect(s3Cache.client.deleteObjects).toHaveBeenNthCalledWith(1, { Bucket: mockBucketName, @@ -159,23 +177,64 @@ describe('S3Cache', () => { }) }) - it('should revalidate cache by tag', async () => { - const mockCacheEntryWithTags = { ...mockCacheEntry, tags: [cacheKey] } - await s3Cache.set(cacheKey, cacheKey, mockCacheEntryWithTags, mockCacheContext) + it('should revalidate cache by tag and delete objects', async () => { + const s3Path = `${pageKey}/${cacheKey}` + const mockQueryResult = { + Items: [ + { + pageKey: { S: pageKey }, + cacheKey: { S: cacheKey } + } + ] + } + + mockDynamoQuery.mockResolvedValueOnce(mockQueryResult) + mockGetObjectTagging.mockResolvedValue({ TagSet: [{ Key: TAG_PREFIX, Value: 'test-tag' }] }) + mockGetObjectList.mockResolvedValueOnce({ + Contents: [{ Key: s3Path + '.json' }, { Key: s3Path + '.html' }, { Key: s3Path + '.rsc' }] + }) - expect(await s3Cache.get(cacheKey, cacheKey)).toEqual(mockCacheEntryWithTags.value.pageData) + await s3Cache.revalidateTag('test-tag') - await s3Cache.revalidateTag(cacheKey) + expect(mockDynamoQuery).toHaveBeenCalledWith({ + TableName: process.env.DYNAMODB_CACHE_TABLE, + KeyConditionExpression: '#field = :value', + ExpressionAttributeNames: { + '#field': 'tags' + }, + ExpressionAttributeValues: { + ':value': { S: 'test-tag' } + } + }) - expect(await s3Cache.get(cacheKey, cacheKey)).toBeNull() + expect(s3Cache.client.deleteObjects).toHaveBeenCalledWith({ + Bucket: mockBucketName, + Delete: { + Objects: [{ Key: s3Path + '.json' }, { Key: s3Path + '.html' }, { Key: s3Path + '.rsc' }] + } + }) }) it('should revalidate cache by path', async () => { - await s3Cache.set(cacheKey, cacheKey, mockCacheEntry, mockCacheContext) - - expect(await s3Cache.get(cacheKey, cacheKey)).toEqual(mockCacheEntry.value.pageData) + const s3Path = `${pageKey}/${cacheKey}` + mockGetObjectList.mockResolvedValueOnce({ + Contents: [{ Key: s3Path + '.json' }, { Key: s3Path + '.html' }, { Key: s3Path + '.rsc' }] + }) await s3Cache.deleteAllByKeyMatch(cacheKey, '') - expect(await s3Cache.get(cacheKey, cacheKey)).toBeNull() + + expect(s3Cache.client.listObjectsV2).toHaveBeenCalledWith({ + Bucket: mockBucketName, + ContinuationToken: undefined, + Prefix: `${cacheKey}/`, + Delimiter: '/' + }) + + expect(s3Cache.client.deleteObjects).toHaveBeenCalledWith({ + Bucket: mockBucketName, + Delete: { + Objects: [{ Key: s3Path + '.json' }, { Key: s3Path + '.html' }, { Key: s3Path + '.rsc' }] + } + }) }) }) diff --git a/src/cacheHandler/strategy/s3.ts b/src/cacheHandler/strategy/s3.ts index 86d5fec..3442380 100644 --- a/src/cacheHandler/strategy/s3.ts +++ b/src/cacheHandler/strategy/s3.ts @@ -4,8 +4,7 @@ import { DynamoDB } from '@aws-sdk/client-dynamodb' import { chunkArray } from '../../common/array' import type { CacheEntry, CacheStrategy, CacheContext } from '@dbbs/next-cache-handler-core' -const TAG_PREFIX = 'revalidateTag' -const NOT_FOUND_ERROR = ['NotFound', 'NoSuchKey'] +export const TAG_PREFIX = 'revalidateTag' enum CacheExtension { JSON = 'json', HTML = 'html', @@ -42,24 +41,13 @@ export class S3Cache implements CacheStrategy { ) } - async get(pageKey: string, cacheKey: string): Promise { - if (!this.client) return null - - const pageData = await this.client - .getObject({ - Bucket: this.bucketName, - Key: `${pageKey}/${cacheKey}.${CacheExtension.JSON}` - }) - .catch((error) => { - if (NOT_FOUND_ERROR.includes(error.name)) return null - throw error - }) - - if (!pageData?.Body) return null - - const response = await pageData.Body.transformToString('utf-8') - - return JSON.parse(response) + async get(): Promise { + // We always need to return null to make nextjs revalidate the page and create new file in s3 + // caching retreiving logic is handled by CloudFront and origin response lambda + // we can't use nextjs cache retrival since it is required to re-render page during validation + // but nextjs built in `revalidate` only clears cache, but does not re-render the page + // so we need to have custom handler to revalidate and re-render the page + return null } async set(pageKey: string, cacheKey: string, data: CacheEntry, ctx: CacheContext): Promise { diff --git a/src/cdk/constructs/CloudFrontDistribution.ts b/src/cdk/constructs/CloudFrontDistribution.ts index cf74f02..1592f49 100644 --- a/src/cdk/constructs/CloudFrontDistribution.ts +++ b/src/cdk/constructs/CloudFrontDistribution.ts @@ -11,7 +11,6 @@ interface CloudFrontPropsDistribution { staticBucket: s3.IBucket renderServerDomain: string requestEdgeFunction: cloudfront.experimental.EdgeFunction - responseEdgeFunction: cloudfront.experimental.EdgeFunction viewerResponseEdgeFunction: cloudfront.experimental.EdgeFunction viewerRequestLambdaEdge: cloudfront.experimental.EdgeFunction cacheConfig: CacheConfig @@ -34,7 +33,6 @@ export class CloudFrontDistribution extends Construct { const { staticBucket, requestEdgeFunction, - responseEdgeFunction, viewerResponseEdgeFunction, viewerRequestLambdaEdge, cacheConfig, @@ -94,10 +92,6 @@ export class CloudFrontDistribution extends Construct { functionVersion: requestEdgeFunction.currentVersion, eventType: cloudfront.LambdaEdgeEventType.ORIGIN_REQUEST }, - { - functionVersion: responseEdgeFunction.currentVersion, - eventType: cloudfront.LambdaEdgeEventType.ORIGIN_RESPONSE - }, { functionVersion: viewerResponseEdgeFunction.currentVersion, eventType: cloudfront.LambdaEdgeEventType.VIEWER_RESPONSE diff --git a/src/cdk/constructs/DynamoDBDistribution.ts b/src/cdk/constructs/DynamoDBDistribution.ts index cf42cf2..37b6bbb 100644 --- a/src/cdk/constructs/DynamoDBDistribution.ts +++ b/src/cdk/constructs/DynamoDBDistribution.ts @@ -1,6 +1,7 @@ import { Construct } from 'constructs' import * as dynamodb from 'aws-cdk-lib/aws-dynamodb' import { RemovalPolicy } from 'aws-cdk-lib' +import { addOutput } from '../../common/cdk' interface DynamoDBDistributionProps { stage: string @@ -23,22 +24,24 @@ export class DynamoDBDistribution extends Construct { type: dynamodb.AttributeType.STRING }, sortKey: { - name: 'tags', + name: 'cacheKey', type: dynamodb.AttributeType.STRING }, removalPolicy: props.isProduction ? RemovalPolicy.RETAIN : RemovalPolicy.DESTROY }) this.table.addGlobalSecondaryIndex({ - indexName: 'cacheKey-index', + indexName: 'pageKey-tags-index', partitionKey: { name: 'pageKey', type: dynamodb.AttributeType.STRING }, sortKey: { - name: 'cacheKey', + name: 'tags', type: dynamodb.AttributeType.STRING } }) + + addOutput(this, `${appName}-DynamoDBCacheTableName`, this.table.tableName) } } diff --git a/src/cdk/constructs/OriginResponseLambdaEdge.ts b/src/cdk/constructs/OriginResponseLambdaEdge.ts deleted file mode 100644 index 213f25a..0000000 --- a/src/cdk/constructs/OriginResponseLambdaEdge.ts +++ /dev/null @@ -1,65 +0,0 @@ -import { Construct } from 'constructs' -import * as iam from 'aws-cdk-lib/aws-iam' -import * as cdk from 'aws-cdk-lib' -import * as lambda from 'aws-cdk-lib/aws-lambda' -import * as cloudfront from 'aws-cdk-lib/aws-cloudfront' -import * as logs from 'aws-cdk-lib/aws-logs' -import path from 'node:path' -import { buildLambda } from '../../common/esbuild' -import { CacheConfig } from '../../types' - -interface OriginResponseLambdaEdgeProps extends cdk.StackProps { - renderWorkerQueueUrl: string - renderWorkerQueueArn: string - buildOutputPath: string - nodejs?: string - cacheConfig: CacheConfig - region: string -} - -const NodeJSEnvironmentMapping: Record = { - '18': lambda.Runtime.NODEJS_18_X, - '20': lambda.Runtime.NODEJS_20_X -} - -export class OriginResponseLambdaEdge extends Construct { - public readonly lambdaEdge: cloudfront.experimental.EdgeFunction - - constructor(scope: Construct, id: string, props: OriginResponseLambdaEdgeProps) { - const { nodejs, buildOutputPath, cacheConfig, renderWorkerQueueUrl, renderWorkerQueueArn, region } = props - super(scope, id) - - const nodeJSEnvironment = NodeJSEnvironmentMapping[nodejs ?? ''] ?? NodeJSEnvironmentMapping['20'] - const name = 'originResponse' - - buildLambda(name, buildOutputPath, { - define: { - 'process.env.RENDER_QUEUE_URL': JSON.stringify(renderWorkerQueueUrl), - 'process.env.CACHE_CONFIG': JSON.stringify(cacheConfig), - 'process.env.QUEUE_REGION': JSON.stringify(region) - } - }) - - const logGroup = new logs.LogGroup(this, 'OriginResponseLambdaEdgeLogGroup', { - logGroupName: `/aws/lambda/${id}-originResponse`, - removalPolicy: cdk.RemovalPolicy.DESTROY, - retention: logs.RetentionDays.ONE_DAY - }) - - this.lambdaEdge = new cloudfront.experimental.EdgeFunction(this, 'OriginResponseLambdaEdge', { - runtime: nodeJSEnvironment, - code: lambda.Code.fromAsset(path.join(buildOutputPath, 'server-functions', name)), - handler: 'index.handler', - logGroup - }) - - this.lambdaEdge.addToRolePolicy( - new iam.PolicyStatement({ - actions: ['sqs:SendMessage'], - resources: [renderWorkerQueueArn] - }) - ) - - logGroup.grantWrite(this.lambdaEdge) - } -} diff --git a/src/cdk/stacks/NextCloudfrontStack.ts b/src/cdk/stacks/NextCloudfrontStack.ts index c401da7..54e233f 100644 --- a/src/cdk/stacks/NextCloudfrontStack.ts +++ b/src/cdk/stacks/NextCloudfrontStack.ts @@ -3,7 +3,6 @@ import { Construct } from 'constructs' import * as s3 from 'aws-cdk-lib/aws-s3' import { OriginRequestLambdaEdge } from '../constructs/OriginRequestLambdaEdge' import { CloudFrontDistribution } from '../constructs/CloudFrontDistribution' -import { OriginResponseLambdaEdge } from '../constructs/OriginResponseLambdaEdge' import { ViewerResponseLambdaEdge } from '../constructs/ViewerResponseLambdaEdge' import { ViewerRequestLambdaEdge } from '../constructs/ViewerRequestLambdaEdge' import { DeployConfig, NextRedirects } from '../../types' @@ -13,8 +12,6 @@ export interface NextCloudfrontStackProps extends StackProps { region: string staticBucketName: string renderServerDomain: string - renderWorkerQueueUrl: string - renderWorkerQueueArn: string buildOutputPath: string deployConfig: DeployConfig imageTTL?: number @@ -25,7 +22,6 @@ export interface NextCloudfrontStackProps extends StackProps { export class NextCloudfrontStack extends Stack { public readonly originRequestLambdaEdge: OriginRequestLambdaEdge - public readonly originResponseLambdaEdge: OriginResponseLambdaEdge public readonly viewerResponseLambdaEdge: ViewerResponseLambdaEdge public readonly viewerRequestLambdaEdge: ViewerRequestLambdaEdge public readonly cloudfront: CloudFrontDistribution @@ -37,8 +33,6 @@ export class NextCloudfrontStack extends Stack { buildOutputPath, staticBucketName, renderServerDomain, - renderWorkerQueueUrl, - renderWorkerQueueArn, region, deployConfig, imageTTL, @@ -57,15 +51,6 @@ export class NextCloudfrontStack extends Stack { nextCachedRoutesMatchers }) - this.originResponseLambdaEdge = new OriginResponseLambdaEdge(this, `${id}-OriginResponseLambdaEdge`, { - nodejs, - renderWorkerQueueUrl, - buildOutputPath, - cacheConfig: deployConfig.cache, - renderWorkerQueueArn, - region - }) - this.viewerRequestLambdaEdge = new ViewerRequestLambdaEdge(this, `${id}-ViewerRequestLambdaEdge`, { buildOutputPath, nodejs, @@ -88,7 +73,6 @@ export class NextCloudfrontStack extends Stack { staticBucket, renderServerDomain, requestEdgeFunction: this.originRequestLambdaEdge.lambdaEdge, - responseEdgeFunction: this.originResponseLambdaEdge.lambdaEdge, viewerResponseEdgeFunction: this.viewerResponseLambdaEdge.lambdaEdge, viewerRequestLambdaEdge: this.viewerRequestLambdaEdge.lambdaEdge, cacheConfig: deployConfig.cache, @@ -96,6 +80,5 @@ export class NextCloudfrontStack extends Stack { }) staticBucket.grantRead(this.originRequestLambdaEdge.lambdaEdge) - staticBucket.grantRead(this.originResponseLambdaEdge.lambdaEdge) } } diff --git a/src/commands/deploy.ts b/src/commands/deploy.ts index 46d8170..b4243ba 100644 --- a/src/commands/deploy.ts +++ b/src/commands/deploy.ts @@ -149,8 +149,6 @@ export const deploy = async (config: DeployConfig) => { nodejs: config.nodejs, staticBucketName: nextRenderServerStackOutput.StaticBucketName, renderServerDomain: nextRenderServerStackOutput.RenderServerDomain, - renderWorkerQueueUrl: nextRenderServerStackOutput.RenderWorkerQueueUrl, - renderWorkerQueueArn: nextRenderServerStackOutput.RenderWorkerQueueArn, buildOutputPath: outputPath, crossRegionReferences: true, region, diff --git a/src/index.ts b/src/index.ts index a758851..29b5a1f 100644 --- a/src/index.ts +++ b/src/index.ts @@ -1 +1,2 @@ export * from './build/withNextDeploy' +export * from './types' diff --git a/src/lambdas/originRequest.ts b/src/lambdas/originRequest.ts index 067ff0d..be4fbf5 100644 --- a/src/lambdas/originRequest.ts +++ b/src/lambdas/originRequest.ts @@ -60,22 +60,38 @@ function getS3ObjectPath(request: CloudFrontRequest, cacheConfig: CacheConfig) { } } -async function checkFileExistsInS3(s3Bucket: string, s3Key: string): Promise { +async function checkFileExistsInS3( + s3Bucket: string, + s3Key: string +): Promise<{ LastModified: Date | string; CacheControl: string } | null> { try { - await s3.send( + const { LastModified = '', CacheControl = '' } = await s3.send( new HeadObjectCommand({ Bucket: s3Bucket, Key: s3Key }) ) - return true + return { LastModified: LastModified!, CacheControl: CacheControl! } } catch (e) { - if ((e as Error).name?.includes('NotFound')) return false + if ((e as Error).name?.includes('NotFound')) return null throw e } } +const shouldRevalidateFile = (s3FileMeta: { LastModified: Date | string; CacheControl: string } | null) => { + if (!s3FileMeta) return false + + const { LastModified, CacheControl } = s3FileMeta + + const match = CacheControl.match(/max-age=(\d+)/) + const maxAge = match ? parseInt(match[1]) : 0 + + const isFileExpired = Date.now() - new Date(LastModified).getTime() > maxAge * 1000 + + return isFileExpired +} + export const handler = async ( event: CloudFrontRequestEvent, _context: Context, @@ -92,9 +108,10 @@ export const handler = async ( try { // Check if file exists in S3 when route accepts caching. - const isFileExists = isCachedRoute ? await checkFileExistsInS3(s3Bucket, s3Key) : false + const s3FileMeta = isCachedRoute ? await checkFileExistsInS3(s3Bucket, s3Key) : false + const shouldRenderFile = !s3FileMeta || shouldRevalidateFile(s3FileMeta) - if (isFileExists) { + if (!shouldRenderFile) { // Modify s3 path request request.uri = `/${s3Key}` diff --git a/src/lambdas/originResponse.ts b/src/lambdas/originResponse.ts deleted file mode 100644 index 642da11..0000000 --- a/src/lambdas/originResponse.ts +++ /dev/null @@ -1,100 +0,0 @@ -import type { CloudFrontRequestCallback, Context, CloudFrontResponseEvent, CloudFrontHeaders } from 'aws-lambda' -import { SQSClient, SendMessageCommand } from '@aws-sdk/client-sqs' - -const sqs = new SQSClient({ region: process.env.QUEUE_REGION! }) - -/** - * Checks if a file is expired based on HTTP headers - * @param headers - Object containing HTTP headers - * @param headers.expires - Expires header value - * @param headers['cache-control'] - Cache-Control header value - * @param headers['last-modified'] - Last-Modified header value - * @returns boolean indicating if the file is expired - */ -function checkFileIsExpired(headers: CloudFrontHeaders): boolean { - const expiresHeader = headers['expires'] ? headers['expires'][0].value : null - const cacheControlHeader = headers['cache-control'] ? headers['cache-control'][0].value : null - const lastModifiedHeader = headers['last-modified'] ? headers['last-modified'][0].value : null - const now = Date.now() - - // Check Expires header - if (expiresHeader && new Date(expiresHeader).getTime() <= now) { - return true - } - - // Check Cache-Control: max-age - if (cacheControlHeader && lastModifiedHeader) { - const maxAgeMatch = cacheControlHeader.match(/max-age=(\d+)/) - if (maxAgeMatch) { - const maxAgeSeconds = parseInt(maxAgeMatch[1], 10) - const responseDate = new Date(lastModifiedHeader).getTime() - const expiryTime = responseDate + maxAgeSeconds * 1000 - - return expiryTime <= now - } - } - - return false -} - -/** - * Extracts the page router path from an S3 URI by removing the cache hash - * @param {string} s3Uri - The S3 URI containing the full path (e.g., '/blog/post/abc123') - * @returns {string} The cleaned router path without cache hash. Special case: returns '/' for '/index' - * - * @description - * This function processes S3 URIs by: - * 1. Splitting the path into segments - * 2. Removing the last segment (cache hash) - * 3. Building actual NextJS page path - */ -function getPageRouterPath(s3Uri: string) { - const path = s3Uri.split('/').slice(0, -1).join('/') - return path === '/index' ? '/' : path -} - -/** - * Lambda handler for checking file expiration in CloudFront responses - * @param event - CloudFront response event - * @param _context - Lambda context - * @param callback - CloudFront request callback - * @returns Promise - * - * @throws Will throw an error if SQS message sending fails - */ -export const handler = async ( - event: CloudFrontResponseEvent, - _context: Context, - callback: CloudFrontRequestCallback -): Promise => { - const response = event.Records[0].cf.response - const request = event.Records[0].cf.request - const headers = response.headers - - try { - // Check if file is expired using Expires or Cache-Control headers - if (checkFileIsExpired(headers)) { - headers['cache-control'] = [{ key: 'Cache-Control', value: 'no-cache' }] - - // Send message to SQS for page revalidation. - await sqs.send( - new SendMessageCommand({ - QueueUrl: process.env.RENDER_QUEUE_URL!, - MessageBody: JSON.stringify({ - paths: [getPageRouterPath(request.uri)] - }), - MessageGroupId: request.uri, - MessageDeduplicationId: Date.now().toString() - }) - ) - } - - callback(null, response) - } catch (error) { - callback(null, { - status: '500', - statusDescription: 'Internal Server Error', - body: `Error: ${(error as Error).message}` - }) - } -}