feat(backend): update collections, config and migration tools

Update Payload CMS configuration, collections (Audit, Posts), and add migration scripts/reports.
This commit is contained in:
2026-02-11 11:50:23 +08:00
parent 8ca609a889
commit be7fc902fb
46 changed files with 5442 additions and 15 deletions

View File

@@ -0,0 +1,212 @@
{
"timestamp": "2026-02-01T07:36:51.596Z",
"dryRun": false,
"summary": {
"total": 37,
"created": 0,
"skipped": 0,
"failed": 37
},
"byCollection": {
"posts": {
"created": 0,
"skipped": 0,
"failed": 37
}
},
"details": {
"posts": {
"collection": "posts",
"created": 0,
"skipped": 0,
"failed": 37,
"results": [
{
"slug": "2-zhao-yao-kong-xiao-fei-zhe-de-xin",
"success": false,
"error": "ValidationError: The following field is invalid: Content > Content"
},
{
"slug": "2022-jie-qing-xing-xiao-quan-gong-lue",
"success": false,
"error": "ValidationError: The following field is invalid: Content > Content"
},
{
"slug": "2022zuixin-google",
"success": false,
"error": "ValidationError: The following field is invalid: Content > Content"
},
{
"slug": "2024googleshang",
"success": false,
"error": "ValidationError: The following field is invalid: Content > Content"
},
{
"slug": "2025huan",
"success": false,
"error": "ValidationError: The following field is invalid: Content > Content"
},
{
"slug": "2025xingxiaozhishi",
"success": false,
"error": "ValidationError: The following field is invalid: Content > Content"
},
{
"slug": "bu-cang-si-da-gong-kai",
"success": false,
"error": "ValidationError: The following field is invalid: Content > Content"
},
{
"slug": "bu-guo",
"success": false,
"error": "ValidationError: The following field is invalid: Content > Content"
},
{
"slug": "da-sheng-ji",
"success": false,
"error": "ValidationError: The following field is invalid: Content > Content"
},
{
"slug": "en-qun-shu-wei-x-google-xiao-xue-tang",
"success": false,
"error": "ValidationError: The following field is invalid: Content > Content"
},
{
"slug": "en-qun-shu-wei-x-google-xiao-xue-tang-5",
"success": false,
"error": "ValidationError: The following field is invalid: Content > Content"
},
{
"slug": "en-qun-shu-wei-x-lian-shu-xiao-xue-tang",
"success": false,
"error": "ValidationError: The following field is invalid: Content > Content"
},
{
"slug": "en-qun-shu-wei-x-metaverse",
"success": false,
"error": "ValidationError: The following field is invalid: Content > Content"
},
{
"slug": "en-qun-shu-wei-x-metaverse-bai-hua-wen",
"success": false,
"error": "ValidationError: The following field is invalid: Content > Content"
},
{
"slug": "facebook-mjing-yin",
"success": false,
"error": "ValidationError: The following field is invalid: Content > Content"
},
{
"slug": "facebookfen",
"success": false,
"error": "ValidationError: The following field is invalid: Content > Content"
},
{
"slug": "faceookshequz",
"success": false,
"error": "ValidationError: The following field is invalid: Content > Content"
},
{
"slug": "fu-ping-ye-shi-yi-zhong-shang-ji",
"success": false,
"error": "ValidationError: The following field is invalid: Content > Content"
},
{
"slug": "genzhu",
"success": false,
"error": "ValidationError: The following field is invalid: Content > Content"
},
{
"slug": "google-comment-delete",
"success": false,
"error": "ValidationError: The following field is invalid: Content > Content"
},
{
"slug": "googlecomment",
"success": false,
"error": "ValidationError: The following field is invalid: Content > Content"
},
{
"slug": "googlemybusiness-optimization",
"success": false,
"error": "ValidationError: The following field is invalid: Content > Content"
},
{
"slug": "googleping",
"success": false,
"error": "ValidationError: The following field is invalid: Content > Content"
},
{
"slug": "hu-nian-ji-xiang-hua-5",
"success": false,
"error": "ValidationError: The following field is invalid: Content > Content"
},
{
"slug": "issue-with-gmb-verification",
"success": false,
"error": "ValidationError: The following field is invalid: Content > Content"
},
{
"slug": "ni-de-huang-jin-di-duan-zu-chuan-mi-fang-jiu-bu-xu-yao-wang-lu-xing-xiao-liao-ma-a",
"success": false,
"error": "ValidationError: The following field is invalid: Content > Content"
},
{
"slug": "ni-xin-ma-ni-xian-zai-kan-de-zhe-pian-wen-jia-zhi-2500-wan-tai-bi-5",
"success": false,
"error": "ValidationError: The following field is invalid: Content > Content"
},
{
"slug": "optimize-gmb-for-local-seo",
"success": false,
"error": "ValidationError: The following field is invalid: Content > Content"
},
{
"slug": "paiming",
"success": false,
"error": "ValidationError: The following field is invalid: Content > Content"
},
{
"slug": "shang-jia-guan-jian-zi",
"success": false,
"error": "ValidationError: The following field is invalid: Content > Content"
},
{
"slug": "shang-jia-jing-ying-mi-ji",
"success": false,
"error": "ValidationError: The following field is invalid: Content > Content"
},
{
"slug": "shequnxingxiao",
"success": false,
"error": "ValidationError: The following field is invalid: Content > Content"
},
{
"slug": "shitidianjia",
"success": false,
"error": "ValidationError: The following field is invalid: Content > Content"
},
{
"slug": "storytelling",
"success": false,
"error": "ValidationError: The following field is invalid: Content > Content"
},
{
"slug": "xi-jing-biao-ti-de-5-ge-jue-qiao-7",
"success": false,
"error": "ValidationError: The following field is invalid: Content > Content"
},
{
"slug": "xiugai-google",
"success": false,
"error": "ValidationError: The following field is invalid: Content > Content"
},
{
"slug": "zheng-que-de-hashtag-dai-ni-shang-tian-tang",
"success": false,
"error": "ValidationError: The following field is invalid: Content > Content"
}
]
}
}
}

View File

@@ -0,0 +1,69 @@
# Migration Report
**Generated:** 2026/2/1 下午3:36:51
**Mode:** ✅ Live Migration
---
## Summary
| Metric | Count |
|--------|-------|
| Total Items | 37 |
| ✅ Created | 0 |
| ⏭️ Skipped | 0 |
| ❌ Failed | 37 |
## By Collection
### Posts
| Metric | Count |
|--------|-------|
| Created | 0 |
| Skipped | 0 |
| Failed | 37 |
## Details
### Posts
#### ❌ Failed (37)
- `2-zhao-yao-kong-xiao-fei-zhe-de-xin`: ValidationError: The following field is invalid: Content > Content
- `2022-jie-qing-xing-xiao-quan-gong-lue`: ValidationError: The following field is invalid: Content > Content
- `2022zuixin-google`: ValidationError: The following field is invalid: Content > Content
- `2024googleshang`: ValidationError: The following field is invalid: Content > Content
- `2025huan`: ValidationError: The following field is invalid: Content > Content
- `2025xingxiaozhishi`: ValidationError: The following field is invalid: Content > Content
- `bu-cang-si-da-gong-kai`: ValidationError: The following field is invalid: Content > Content
- `bu-guo`: ValidationError: The following field is invalid: Content > Content
- `da-sheng-ji`: ValidationError: The following field is invalid: Content > Content
- `en-qun-shu-wei-x-google-xiao-xue-tang`: ValidationError: The following field is invalid: Content > Content
- `en-qun-shu-wei-x-google-xiao-xue-tang-5`: ValidationError: The following field is invalid: Content > Content
- `en-qun-shu-wei-x-lian-shu-xiao-xue-tang`: ValidationError: The following field is invalid: Content > Content
- `en-qun-shu-wei-x-metaverse`: ValidationError: The following field is invalid: Content > Content
- `en-qun-shu-wei-x-metaverse-bai-hua-wen`: ValidationError: The following field is invalid: Content > Content
- `facebook-mjing-yin`: ValidationError: The following field is invalid: Content > Content
- `facebookfen`: ValidationError: The following field is invalid: Content > Content
- `faceookshequz`: ValidationError: The following field is invalid: Content > Content
- `fu-ping-ye-shi-yi-zhong-shang-ji`: ValidationError: The following field is invalid: Content > Content
- `genzhu`: ValidationError: The following field is invalid: Content > Content
- `google-comment-delete`: ValidationError: The following field is invalid: Content > Content
- `googlecomment`: ValidationError: The following field is invalid: Content > Content
- `googlemybusiness-optimization`: ValidationError: The following field is invalid: Content > Content
- `googleping`: ValidationError: The following field is invalid: Content > Content
- `hu-nian-ji-xiang-hua-5`: ValidationError: The following field is invalid: Content > Content
- `issue-with-gmb-verification`: ValidationError: The following field is invalid: Content > Content
- `ni-de-huang-jin-di-duan-zu-chuan-mi-fang-jiu-bu-xu-yao-wang-lu-xing-xiao-liao-ma-a`: ValidationError: The following field is invalid: Content > Content
- `ni-xin-ma-ni-xian-zai-kan-de-zhe-pian-wen-jia-zhi-2500-wan-tai-bi-5`: ValidationError: The following field is invalid: Content > Content
- `optimize-gmb-for-local-seo`: ValidationError: The following field is invalid: Content > Content
- `paiming`: ValidationError: The following field is invalid: Content > Content
- `shang-jia-guan-jian-zi`: ValidationError: The following field is invalid: Content > Content
- `shang-jia-jing-ying-mi-ji`: ValidationError: The following field is invalid: Content > Content
- `shequnxingxiao`: ValidationError: The following field is invalid: Content > Content
- `shitidianjia`: ValidationError: The following field is invalid: Content > Content
- `storytelling`: ValidationError: The following field is invalid: Content > Content
- `xi-jing-biao-ti-de-5-ge-jue-qiao-7`: ValidationError: The following field is invalid: Content > Content
- `xiugai-google`: ValidationError: The following field is invalid: Content > Content
- `zheng-que-de-hashtag-dai-ni-shang-tian-tang`: ValidationError: The following field is invalid: Content > Content

View File

@@ -0,0 +1,212 @@
{
"timestamp": "2026-02-05T02:55:43.614Z",
"dryRun": false,
"summary": {
"total": 37,
"created": 37,
"skipped": 0,
"failed": 0
},
"byCollection": {
"posts": {
"created": 37,
"skipped": 0,
"failed": 0
}
},
"details": {
"posts": {
"collection": "posts",
"created": 37,
"skipped": 0,
"failed": 0,
"results": [
{
"slug": "2-zhao-yao-kong-xiao-fei-zhe-de-xin",
"success": true,
"id": "698406d6b591b1d027f4ebce"
},
{
"slug": "2022-jie-qing-xing-xiao-quan-gong-lue",
"success": true,
"id": "698406d6b591b1d027f4ebd4"
},
{
"slug": "2022zuixin-google",
"success": true,
"id": "698406d7b591b1d027f4ebda"
},
{
"slug": "2024googleshang",
"success": true,
"id": "698406d7b591b1d027f4ebe0"
},
{
"slug": "2025huan",
"success": true,
"id": "698406d7b591b1d027f4ebe6"
},
{
"slug": "2025xingxiaozhishi",
"success": true,
"id": "698406d8b591b1d027f4ebec"
},
{
"slug": "bu-cang-si-da-gong-kai",
"success": true,
"id": "698406d8b591b1d027f4ebf2"
},
{
"slug": "bu-guo",
"success": true,
"id": "698406d8b591b1d027f4ebf8"
},
{
"slug": "da-sheng-ji",
"success": true,
"id": "698406d9b591b1d027f4ebfe"
},
{
"slug": "en-qun-shu-wei-x-google-xiao-xue-tang",
"success": true,
"id": "698406d9b591b1d027f4ec04"
},
{
"slug": "en-qun-shu-wei-x-google-xiao-xue-tang-5",
"success": true,
"id": "698406d9b591b1d027f4ec0a"
},
{
"slug": "en-qun-shu-wei-x-lian-shu-xiao-xue-tang",
"success": true,
"id": "698406d9b591b1d027f4ec10"
},
{
"slug": "en-qun-shu-wei-x-metaverse",
"success": true,
"id": "698406dab591b1d027f4ec16"
},
{
"slug": "en-qun-shu-wei-x-metaverse-bai-hua-wen",
"success": true,
"id": "698406dab591b1d027f4ec1c"
},
{
"slug": "facebook-mjing-yin",
"success": true,
"id": "698406dab591b1d027f4ec22"
},
{
"slug": "facebookfen",
"success": true,
"id": "698406dbb591b1d027f4ec28"
},
{
"slug": "faceookshequz",
"success": true,
"id": "698406dbb591b1d027f4ec2e"
},
{
"slug": "fu-ping-ye-shi-yi-zhong-shang-ji",
"success": true,
"id": "698406dbb591b1d027f4ec34"
},
{
"slug": "genzhu",
"success": true,
"id": "698406dcb591b1d027f4ec3a"
},
{
"slug": "google-comment-delete",
"success": true,
"id": "698406dcb591b1d027f4ec40"
},
{
"slug": "googlecomment",
"success": true,
"id": "698406dcb591b1d027f4ec46"
},
{
"slug": "googlemybusiness-optimization",
"success": true,
"id": "698406ddb591b1d027f4ec4c"
},
{
"slug": "googleping",
"success": true,
"id": "698406ddb591b1d027f4ec52"
},
{
"slug": "hu-nian-ji-xiang-hua-5",
"success": true,
"id": "698406ddb591b1d027f4ec58"
},
{
"slug": "issue-with-gmb-verification",
"success": true,
"id": "698406ddb591b1d027f4ec5e"
},
{
"slug": "ni-de-huang-jin-di-duan-zu-chuan-mi-fang-jiu-bu-xu-yao-wang-lu-xing-xiao-liao-ma-a",
"success": true,
"id": "698406deb591b1d027f4ec64"
},
{
"slug": "ni-xin-ma-ni-xian-zai-kan-de-zhe-pian-wen-jia-zhi-2500-wan-tai-bi-5",
"success": true,
"id": "698406deb591b1d027f4ec6a"
},
{
"slug": "optimize-gmb-for-local-seo",
"success": true,
"id": "698406deb591b1d027f4ec70"
},
{
"slug": "paiming",
"success": true,
"id": "698406dfb591b1d027f4ec76"
},
{
"slug": "shang-jia-guan-jian-zi",
"success": true,
"id": "698406dfb591b1d027f4ec7c"
},
{
"slug": "shang-jia-jing-ying-mi-ji",
"success": true,
"id": "698406dfb591b1d027f4ec82"
},
{
"slug": "shequnxingxiao",
"success": true,
"id": "698406e0b591b1d027f4ec88"
},
{
"slug": "shitidianjia",
"success": true,
"id": "698406e0b591b1d027f4ec8e"
},
{
"slug": "storytelling",
"success": true,
"id": "698406e0b591b1d027f4ec94"
},
{
"slug": "xi-jing-biao-ti-de-5-ge-jue-qiao-7",
"success": true,
"id": "698406e0b591b1d027f4ec9a"
},
{
"slug": "xiugai-google",
"success": true,
"id": "698406e1b591b1d027f4eca0"
},
{
"slug": "zheng-que-de-hashtag-dai-ni-shang-tian-tang",
"success": true,
"id": "698406e1b591b1d027f4eca6"
}
]
}
}
}

View File

@@ -0,0 +1,69 @@
# Migration Report
**Generated:** 2026/2/5 上午10:55:43
**Mode:** ✅ Live Migration
---
## Summary
| Metric | Count |
|--------|-------|
| Total Items | 37 |
| ✅ Created | 37 |
| ⏭️ Skipped | 0 |
| ❌ Failed | 0 |
## By Collection
### Posts
| Metric | Count |
|--------|-------|
| Created | 37 |
| Skipped | 0 |
| Failed | 0 |
## Details
### Posts
#### ✅ Created (37)
- `2-zhao-yao-kong-xiao-fei-zhe-de-xin` (ID: 698406d6b591b1d027f4ebce)
- `2022-jie-qing-xing-xiao-quan-gong-lue` (ID: 698406d6b591b1d027f4ebd4)
- `2022zuixin-google` (ID: 698406d7b591b1d027f4ebda)
- `2024googleshang` (ID: 698406d7b591b1d027f4ebe0)
- `2025huan` (ID: 698406d7b591b1d027f4ebe6)
- `2025xingxiaozhishi` (ID: 698406d8b591b1d027f4ebec)
- `bu-cang-si-da-gong-kai` (ID: 698406d8b591b1d027f4ebf2)
- `bu-guo` (ID: 698406d8b591b1d027f4ebf8)
- `da-sheng-ji` (ID: 698406d9b591b1d027f4ebfe)
- `en-qun-shu-wei-x-google-xiao-xue-tang` (ID: 698406d9b591b1d027f4ec04)
- `en-qun-shu-wei-x-google-xiao-xue-tang-5` (ID: 698406d9b591b1d027f4ec0a)
- `en-qun-shu-wei-x-lian-shu-xiao-xue-tang` (ID: 698406d9b591b1d027f4ec10)
- `en-qun-shu-wei-x-metaverse` (ID: 698406dab591b1d027f4ec16)
- `en-qun-shu-wei-x-metaverse-bai-hua-wen` (ID: 698406dab591b1d027f4ec1c)
- `facebook-mjing-yin` (ID: 698406dab591b1d027f4ec22)
- `facebookfen` (ID: 698406dbb591b1d027f4ec28)
- `faceookshequz` (ID: 698406dbb591b1d027f4ec2e)
- `fu-ping-ye-shi-yi-zhong-shang-ji` (ID: 698406dbb591b1d027f4ec34)
- `genzhu` (ID: 698406dcb591b1d027f4ec3a)
- `google-comment-delete` (ID: 698406dcb591b1d027f4ec40)
- `googlecomment` (ID: 698406dcb591b1d027f4ec46)
- `googlemybusiness-optimization` (ID: 698406ddb591b1d027f4ec4c)
- `googleping` (ID: 698406ddb591b1d027f4ec52)
- `hu-nian-ji-xiang-hua-5` (ID: 698406ddb591b1d027f4ec58)
- `issue-with-gmb-verification` (ID: 698406ddb591b1d027f4ec5e)
- `ni-de-huang-jin-di-duan-zu-chuan-mi-fang-jiu-bu-xu-yao-wang-lu-xing-xiao-liao-ma-a` (ID: 698406deb591b1d027f4ec64)
- `ni-xin-ma-ni-xian-zai-kan-de-zhe-pian-wen-jia-zhi-2500-wan-tai-bi-5` (ID: 698406deb591b1d027f4ec6a)
- `optimize-gmb-for-local-seo` (ID: 698406deb591b1d027f4ec70)
- `paiming` (ID: 698406dfb591b1d027f4ec76)
- `shang-jia-guan-jian-zi` (ID: 698406dfb591b1d027f4ec7c)
- `shang-jia-jing-ying-mi-ji` (ID: 698406dfb591b1d027f4ec82)
- `shequnxingxiao` (ID: 698406e0b591b1d027f4ec88)
- `shitidianjia` (ID: 698406e0b591b1d027f4ec8e)
- `storytelling` (ID: 698406e0b591b1d027f4ec94)
- `xi-jing-biao-ti-de-5-ge-jue-qiao-7` (ID: 698406e0b591b1d027f4ec9a)
- `xiugai-google` (ID: 698406e1b591b1d027f4eca0)
- `zheng-que-de-hashtag-dai-ni-shang-tian-tang` (ID: 698406e1b591b1d027f4eca6)

View File

@@ -0,0 +1,9 @@
const fs = require('fs');
const p = JSON.parse(fs.readFileSync('./package.json.orig', 'utf8'));
delete p.devDependencies;
delete p.engines;
if (p.dependencies && p.dependencies['@enchun/shared']) {
p.dependencies['@enchun/shared'] = 'file:./shared';
}
fs.writeFileSync('./package.json', JSON.stringify(p, null, 2));
fs.unlinkSync('./package.json.orig');

View File

@@ -0,0 +1,50 @@
{
"_comment": "Sample Webflow export data for testing migration script",
"_instructions": "Copy this file to webflow-export.json and fill in your actual data from Webflow",
"categories": [
{
"name": "Google小學堂",
"slug": "google-workshop",
"colorHex": "#4285f4"
},
{
"name": "Meta小學堂",
"slug": "meta-workshop",
"colorHex": "#0668e1"
},
{
"name": "行銷時事最前線",
"slug": "marketing-news",
"colorHex": "#34a853"
},
{
"name": "恩群數位最新公告",
"slug": "enchun-announcements",
"colorHex": "#ea4335"
}
],
"posts": [
{
"title": "示例文章標題",
"slug": "sample-post",
"content": "<p>這是文章內容...</p>",
"publishedDate": "2024-01-15T10:00:00Z",
"postCategory": "google-workshop",
"featuredImage": "https://example.com/image.jpg",
"seoTitle": "SEO 標題",
"seoDescription": "SEO 描述",
"excerpt": "文章摘要..."
}
],
"portfolio": [
{
"name": "示例網站名稱",
"slug": "sample-portfolio",
"websiteLink": "https://example.com",
"previewImage": "https://example.com/preview.jpg",
"description": "專案描述...",
"websiteType": "corporate",
"tags": "電商, SEO, 網站設計"
}
]
}

View File

@@ -1,23 +1,40 @@
import { withPayload } from '@payloadcms/next/withPayload'
import { fileURLToPath } from 'node:url'
import { dirname, join } from 'node:path'
import redirects from './redirects.js'
const __dirname = dirname(fileURLToPath(import.meta.url))
const NEXT_PUBLIC_SERVER_URL = process.env.VERCEL_PROJECT_PRODUCTION_URL
? `https://${process.env.VERCEL_PROJECT_PRODUCTION_URL}`
: undefined || process.env.__NEXT_PRIVATE_ORIGIN || 'http://localhost:3000'
/** @type {import('next').NextConfig} */
const nextConfig = {
output: 'standalone',
// Required for monorepo: trace dependencies from the monorepo root
outputFileTracingRoot: join(__dirname, '../../'),
eslint: { ignoreDuringBuilds: true },
typescript: { ignoreBuildErrors: true },
images: {
remotePatterns: [
...[NEXT_PUBLIC_SERVER_URL /* 'https://example.com' */].map((item) => {
const url = new URL(item)
...[NEXT_PUBLIC_SERVER_URL, process.env.NEXT_PUBLIC_SERVER_URL]
.filter(Boolean)
.map((item) => {
const urlString = item.startsWith('http') ? item : `https://${item}`
return {
hostname: url.hostname,
protocol: url.protocol.replace(':', ''),
}
}),
try {
const url = new URL(urlString)
return {
hostname: url.hostname,
protocol: url.protocol.replace(':', ''),
}
} catch (_) {
return null
}
})
.filter(Boolean),
],
},
webpack: (webpackConfig) => {

View File

@@ -8,7 +8,7 @@
"scripts": {
"build": "cross-env NODE_OPTIONS=--no-deprecation next build",
"postbuild": "next-sitemap --config next-sitemap.config.cjs",
"dev": "cross-env NODE_OPTIONS=--no-deprecation next dev",
"dev": "cross-env NODE_OPTIONS=--no-deprecation next dev --port 3000",
"dev:prod": "cross-env NODE_OPTIONS=--no-deprecation rm -rf .next && pnpm build && pnpm start",
"generate:importmap": "cross-env NODE_OPTIONS=--no-deprecation payload generate:importmap",
"generate:types": "cross-env NODE_OPTIONS=--no-deprecation payload generate:types",
@@ -25,7 +25,10 @@
"test:load:all": "k6 run tests/k6/public-browsing.js && k6 run tests/k6/api-performance.js",
"test:load:admin": "k6 run tests/k6/admin-operations.js",
"test:load:api": "k6 run tests/k6/api-performance.js",
"typecheck": "tsc --noEmit"
"typecheck": "tsc --noEmit",
"migrate": "tsx scripts/migration/migrate.ts",
"migrate:dry": "tsx scripts/migration/migrate.ts --dry-run --verbose",
"migrate:posts": "tsx scripts/migration/migrate.ts --collection posts"
},
"dependencies": {
"@enchun/shared": "workspace:*",
@@ -74,15 +77,19 @@
"@types/react-dom": "19.1.6",
"@vitejs/plugin-react": "4.5.2",
"autoprefixer": "^10.4.19",
"cheerio": "^1.2.0",
"copyfiles": "^2.4.1",
"csv-parse": "^6.1.0",
"eslint": "^9.16.0",
"eslint-config-next": "15.4.4",
"html-parse-stringify": "^3.0.1",
"jsdom": "26.1.0",
"playwright": "1.54.1",
"playwright-core": "1.54.1",
"postcss": "^8.4.38",
"prettier": "^3.4.2",
"tailwindcss": "^3.4.3",
"tsx": "^4.21.0",
"typescript": "5.7.3",
"vite-tsconfig-paths": "5.1.4",
"vitest": "3.2.3"

View File

@@ -150,6 +150,9 @@ importers:
eslint-config-next:
specifier: 15.4.4
version: 15.4.4(eslint@9.37.0(jiti@1.21.7))(typescript@5.7.3)
html-parse-stringify:
specifier: ^3.0.1
version: 3.0.1
jsdom:
specifier: 26.1.0
version: 26.1.0
@@ -3387,6 +3390,9 @@ packages:
resolution: {integrity: sha512-Y22oTqIU4uuPgEemfz7NDJz6OeKf12Lsu+QC+s3BVpda64lTiMYCyGwg5ki4vFxkMwQdeZDl2adZoqUgdFuTgQ==}
engines: {node: '>=18'}
html-parse-stringify@3.0.1:
resolution: {integrity: sha512-KknJ50kTInJ7qIScF3jeaFRpMpE8/lfiTdzf/twXyPBLAGrLRTmkz3AdTnKeh40X8k9L2fdYwEp/42WGXIRGcg==}
http-proxy-agent@7.0.2:
resolution: {integrity: sha512-T1gkAiYYDWYx3V5Bmyu7HcfcvL7mUrTWiM6yOfa3PIphViJ/gFPbvidQ+veqSOHci/PxBcDabeUNCzpOODJZig==}
engines: {node: '>= 14'}
@@ -5055,6 +5061,10 @@ packages:
jsdom:
optional: true
void-elements@3.1.0:
resolution: {integrity: sha512-Dhxzh5HZuiHQhbvTW9AMetFfBHDMYpo23Uo9btPXgdYP+3T5S+p+jgNy7spra+veYhBP2dCSgxR/i2Y02h5/6w==}
engines: {node: '>=0.10.0'}
w3c-xmlserializer@5.0.0:
resolution: {integrity: sha512-o8qghlI8NZHU1lLPrpi2+Uq7abh4GGPpYANlalzWxyWteJOCsr/P+oPBA49TOLu5FTZO4d3F9MnWJfiMo4BkmA==}
engines: {node: '>=18'}
@@ -9065,6 +9075,10 @@ snapshots:
dependencies:
whatwg-encoding: 3.1.1
html-parse-stringify@3.0.1:
dependencies:
void-elements: 3.1.0
http-proxy-agent@7.0.2:
dependencies:
agent-base: 7.1.4
@@ -11057,6 +11071,8 @@ snapshots:
- tsx
- yaml
void-elements@3.1.0: {}
w3c-xmlserializer@5.0.0:
dependencies:
xml-name-validator: 5.0.0

View File

@@ -0,0 +1,190 @@
# Webflow to Payload CMS Migration Script
Story 1.3: Content Migration Script
## Overview
This script migrates content from Webflow CMS to Payload CMS. It supports:
- **JSON export** - If you have Webflow JSON export files
- **HTML parsing** - If you only have access to the public website HTML
- **Manual entry** - You can manually edit the JSON data file
## Prerequisites
1. **MongoDB must be running** - The script connects to Payload CMS which requires MongoDB
2. **Environment variables** - Ensure `.env` file has PAYLOAD_SECRET and DATABASE_URI
3. **Source data** - Prepare your webflow-export.json file
## Quick Start
```bash
# Navigate to backend directory
cd apps/backend
# Ensure MongoDB is running (if using local)
# Or the Payload CMS dev server:
pnpm dev
# In another terminal, run dry-run (preview mode, no changes)
pnpm migrate:dry
# Run actual migration
pnpm migrate
# Migrate specific collection
pnpm migrate:posts
# Show help
tsx scripts/migration/migrate.ts --help
```
## Environment Setup
The script loads environment variables from:
- `.env` (project root)
- `.env.enchun-cms-v2` (project root)
- `apps/backend/.env`
Required variables:
```bash
PAYLOAD_SECRET=your-secret-key
DATABASE_URI=mongodb://localhost:27017/your-db
R2_ACCOUNT_ID=your-r2-account
R2_ACCESS_KEY_ID=your-access-key
R2_SECRET_ACCESS_KEY=your-secret-key
R2_BUCKET=your-bucket-name
```
## CLI Options
| Option | Short | Description |
|--------|-------|-------------|
| `--dry-run` | `-n` | Run without making changes (preview mode) |
| `--verbose` | `-v` | Show detailed logging output |
| `--force` | `-f` | Overwrite existing items (skip deduplication) |
| `--collection <name>` | `-c` | Specific collection: categories|posts|portfolio|all |
| `--source <path>` | `-s` | Path to export file (default: ./data/webflow-export.json) |
| `--batch-size <num>` | | Batch size for parallel processing (default: 5) |
| `--help` | `-h` | Show help message |
## Data Format
Create a `webflow-export.json` file in the `apps/backend/data/` directory:
```json
{
"categories": [
{
"name": "分類名稱(中文)",
"slug": "category-slug",
"colorHex": "#0066cc"
}
],
"posts": [
{
"title": "文章標題",
"slug": "post-slug",
"content": "<p>HTML content...</p>",
"publishedDate": "2024-01-15T10:00:00Z",
"postCategory": "category-slug",
"featuredImage": "https://example.com/image.jpg",
"seoTitle": "SEO Title",
"seoDescription": "SEO Description",
"excerpt": "Article excerpt..."
}
],
"portfolio": [
{
"name": "作品名稱",
"slug": "portfolio-slug",
"websiteLink": "https://example.com",
"previewImage": "https://example.com/preview.jpg",
"description": "作品描述",
"websiteType": "corporate",
"tags": "tag1, tag2, tag3"
}
]
}
```
## Field Mappings
### Categories
| Webflow Field | Payload Field |
|---------------|---------------|
| name | title |
| slug | slug (preserved) |
| color-hex | textColor + backgroundColor |
### Posts
| Webflow Field | Payload Field |
|---------------|---------------|
| title | title |
| slug | slug (preserved for SEO) |
| body | content (HTML → Lexical) |
| published-date | publishedAt |
| post-category | categories (relationship) |
| featured-image | heroImage (R2 upload) |
| seo-title | meta.title |
| seo-description | meta.description |
### Portfolio
| Webflow Field | Payload Field |
|---------------|---------------|
| Name | title |
| Slug | slug |
| website-link | url |
| preview-image | image (R2 upload) |
| description | description |
| website-type | websiteType |
| tags | tags (array) |
## Migration Order
1. **Categories** (first - no dependencies)
2. **Media** images (independent)
3. **Posts** (depends on Categories and Media)
4. **Portfolio** (depends on Media)
## Reports
After each migration, a report is generated in `apps/backend/reports/`:
- `migration-YYYY-MM-DD.json` - Machine-readable JSON
- `migration-YYYY-MM-DD.md` - Human-readable Markdown
## Troubleshooting
### Script fails to connect to Payload CMS
Ensure the Payload CMS server is running:
```bash
cd apps/backend
pnpm dev
```
### Images not uploading
Check environment variables in `.env`:
- `R2_ACCOUNT_ID`
- `R2_ACCESS_KEY_ID`
- `R2_SECRET_ACCESS_KEY`
- `R2_BUCKET_NAME`
### Duplicate entries
By default, the script skips existing items. Use `--force` to overwrite:
```bash
pnpm migrate --force
```
## Module Structure
```
scripts/migration/
├── migrate.ts # Main entry point
├── types.ts # TypeScript interfaces
├── utils.ts # Helper functions
├── transformers.ts # Data transformation
├── mediaHandler.ts # Image download/upload
├── deduplicator.ts # Duplicate checking
├── reporter.ts # Report generation
├── htmlParser.ts # HTML parsing (no JSON)
└── README.md # This file
```

View File

@@ -0,0 +1,59 @@
#!/usr/bin/env tsx
import { config as dotenvConfig } from 'dotenv'
import { resolve, dirname } from 'path'
import { fileURLToPath } from 'url'
const __filename = fileURLToPath(import.meta.url)
const __dirname = dirname(__filename)
const envPath = resolve(__dirname, '../../.env')
dotenvConfig({ path: envPath })
import { parseWebflowCSV } from './csvParser'
import { htmlToLexical } from './lexicalConverter'
async function main() {
const data = await parseWebflowCSV('/Users/pukpuk/Dev/website-enchun-mgr/恩群數位行銷 - 行銷放大鏡集.csv')
const successPost = data.posts.find((p: any) => p.title === '正確的 hashtag 帶你上天堂')
const failPost = data.posts.find((p: any) => p.title.includes('一點都不難'))
console.log('=== SUCCESSFUL POST ===')
console.log('Title:', successPost.title)
console.log('HTML content length:', successPost.content?.length)
const successLexical = htmlToLexical(successPost.content || '')
console.log('Lexical JSON length:', successLexical.length)
const successParsed = JSON.parse(successLexical)
console.log('Lexical children count:', successParsed.root?.children?.length)
console.log('\n=== FAILED POST ===')
console.log('Title:', failPost.title)
console.log('HTML content length:', failPost.content?.length)
const failLexical = htmlToLexical(failPost.content || '')
console.log('Lexical JSON length:', failLexical.length)
const failParsed = JSON.parse(failLexical)
console.log('Lexical children count:', failParsed.root?.children?.length)
// Check for special characters in HTML
console.log('\n=== CHARACTER CHECK ===')
const specialChars = /["\n\r\t]/
const failMatches = (failPost.content?.match(specialChars) || []).length
const successMatches = (successPost.content?.match(specialChars) || []).length
console.log('Special chars in fail post:', failMatches)
console.log('Special chars in success post:', successMatches)
// Look for empty text nodes
let emptyTextCount = 0
failParsed.root?.children?.forEach((child: any) => {
child.children?.forEach((grandchild: any) => {
if (grandchild.type === 'text' && grandchild.text === '') {
emptyTextCount++
}
})
})
console.log('Empty text nodes in fail post:', emptyTextCount)
}
main().catch(console.error)

View File

@@ -0,0 +1,97 @@
#!/usr/bin/env tsx
/**
* Analyze Post Data Structure
* Compares migrated posts vs manually created posts
*/
import { config as dotenvConfig } from 'dotenv'
dotenvConfig({ path: '.env' })
import { getPayload } from 'payload'
import config from '../../src/payload.config'
async function main() {
const payload = await getPayload({ config })
console.log('🔍 Fetching posts for analysis...\n')
const posts = await payload.find({
collection: 'posts',
limit: 5,
depth: 0,
})
if (posts.docs.length === 0) {
console.log('No posts found')
return
}
// Analyze first post in detail
const post = posts.docs[0]
console.log('═══════════════════════════════════════════════════════════')
console.log(`POST: "${post.title}"`)
console.log('═══════════════════════════════════════════════════════════\n')
// Basic info
console.log('📋 BASIC INFO:')
console.log(` ID: ${post.id}`)
console.log(` Slug: ${post.slug}`)
console.log(` Status: ${post.status}`)
console.log(` Created: ${post.createdAt}`)
// Content analysis
console.log('\n📝 CONTENT FIELD:')
console.log(` Type: ${typeof post.content}`)
console.log(` Is String: ${typeof post.content === 'string'}`)
console.log(` Is Object: ${typeof post.content === 'object'}`)
if (typeof post.content === 'string') {
console.log(` String Length: ${post.content.length} chars`)
try {
const parsed = JSON.parse(post.content)
console.log(` Parsed Type: ${parsed?.type}`)
console.log(` Parsed Version: ${parsed?.version}`)
console.log(` Children Count: ${parsed?.children?.length}`)
// Show first child structure
if (parsed?.children?.[0]) {
console.log('\n First Child:')
const firstChild = parsed.children[0]
console.log(` Type: ${firstChild.type}`)
console.log(` Version: ${firstChild.version}`)
if (firstChild.children) {
console.log(` Has Children: true (${firstChild.children.length})`)
if (firstChild.children[0]) {
console.log(` First Grandchild: ${JSON.stringify(firstChild.children[0], null, 2).split('\n').join('\n ')}`)
}
}
}
// Show full structure
console.log('\n FULL LEXICAL STRUCTURE:')
console.log(' ' + JSON.stringify(parsed, null, 2).split('\n').join('\n '))
} catch (e) {
console.log(` Parse Error: ${e}`)
console.log(` Raw Content (first 500 chars): ${post.content.substring(0, 500)}...`)
}
} else if (typeof post.content === 'object') {
console.log(' OBJECT STRUCTURE:')
console.log(' ' + JSON.stringify(post.content, null, 2).split('\n').join('\n '))
}
// Other fields
console.log('\n🏷 OTHER FIELDS:')
console.log(` Excerpt: ${post.excerpt?.substring(0, 100) || 'none'}...`)
console.log(` PublishedAt: ${post.publishedAt}`)
console.log(` Categories: ${post.categories?.length || 0} items`)
if (post.heroImage) {
console.log(` HeroImage: ${typeof post.heroImage} = ${post.heroImage}`)
}
console.log('\n═══════════════════════════════════════════════════════════')
}
main().catch(console.error)

View File

@@ -0,0 +1,48 @@
#!/usr/bin/env tsx
import { config as dotenvConfig } from 'dotenv'
import { resolve, dirname } from 'path'
import { fileURLToPath } from 'url'
const __filename = fileURLToPath(import.meta.url)
const __dirname = dirname(__filename)
const envPath = resolve(__dirname, '../../.env')
dotenvConfig({ path: envPath })
import { parseWebflowCSV } from './csvParser'
import { htmlToLexical } from './lexicalConverter'
async function main() {
const data = await parseWebflowCSV('/Users/pukpuk/Dev/website-enchun-mgr/恩群數位行銷 - 行銷放大鏡集.csv')
const post = data.posts.find((p: any) => p.title.includes('掌握故事行銷'))
const lexical = htmlToLexical(post.content || '')
const parsed = JSON.parse(lexical)
console.log('All link URLs:')
const findLinks = (nodes: any[], depth = 0) => {
if (depth > 10) return
nodes.forEach((node: any, i: number) => {
if (node.type === 'link') {
const url = node.url
const isValid = url && url !== '#' && (url.startsWith('http://') || url.startsWith('https://') || url.startsWith('/'))
console.log(` [${depth}.${i}] ${url} - Valid: ${isValid}`)
}
if (node.children) {
findLinks(node.children, depth + 1)
}
})
}
findLinks(parsed.root?.children || [])
// Check raw HTML for links
console.log('\nRaw HTML links:')
const linkRegex = /<a[^>]+href=["']([^"']+)["'][^>]*>/gi
let match
const html = post.content || ''
while ((match = linkRegex.exec(html)) !== null) {
console.log(' ', match[1])
}
}
main().catch(console.error)

View File

@@ -0,0 +1,40 @@
#!/usr/bin/env tsx
import { config as dotenvConfig } from 'dotenv'
dotenvConfig({ path: '.env' })
import { parseWebflowCSV } from './csvParser'
import { htmlToLexical } from './lexicalConverter'
async function main() {
const data = await parseWebflowCSV('/Users/pukpuk/Dev/website-enchun-mgr/恩群數位行銷 - 行銷放大鏡集.csv')
const successPost = data.posts.find((p: any) => p.title === '正確的 hashtag 帶你上天堂')
const failPost = data.posts.find((p: any) => p.title.includes('一點都不難'))
console.log('=== SUCCESSFUL POST ===')
console.log('Title:', successPost.title)
console.log('Content length:', successPost.content?.length)
const successLexical = htmlToLexical(successPost.content || '')
const successParsed = JSON.parse(successLexical)
console.log('Has root:', successParsed.root !== undefined)
console.log('Root type:', successParsed.root?.type)
console.log('Root children count:', successParsed.root?.children?.length)
console.log('\n=== FAILED POST ===')
console.log('Title:', failPost.title)
console.log('Content length:', failPost.content?.length)
const failLexical = htmlToLexical(failPost.content || '')
const failParsed = JSON.parse(failLexical)
console.log('Has root:', failParsed.root !== undefined)
console.log('Root type:', failParsed.root?.type)
console.log('Root children count:', failParsed.root?.children?.length)
// Check for any differences in structure
console.log('\n=== STRUCTURE COMPARISON ===')
console.log('Success first child type:', successParsed.root?.children?.[0]?.type)
console.log('Fail first child type:', failParsed.root?.children?.[0]?.type)
}
main().catch(console.error)

View File

@@ -0,0 +1,307 @@
/**
* CSV Parser for Webflow Exports
* Story 1.3: Content Migration Script
*
* Parses Webflow CSV export files and converts to WebflowExportData format
*/
import type { WebflowExportData, WebflowPost, WebflowCategory } from './types'
import { readFile } from 'fs/promises'
import { parse } from 'csv-parse/sync'
// ============================================================
// CSV ROW INTERFACES
// ============================================================
interface WebflowPostCsvRow {
'文章標題': string
'Slug': string
'Collection ID': string
'Item ID': string
'Archived': string
'Draft': string
'Created On': string
'Updated On': string
'Published On': string
'強調圖片': string
'Open Graph 顯示圖片': string
'文章簡述': string
'發文日期': string
'文章分類': string
'發文內容': string
'是否放在頁尾': string
}
interface WebflowCategoryCsvRow {
name: string
slug: string
[key: string]: string
}
interface WebflowPortfolioCsvRow {
Name: string
Slug: string
'website-link': string
'preview-image': string
description: string
'website-type': string
tags: string
[key: string]: string
}
// ============================================================
// MAIN CSV PARSER
// ============================================================
/**
* Parse Webflow CSV file and convert to WebflowExportData
*/
export async function parseWebflowCSV(filePath: string): Promise<WebflowExportData> {
const content = await readFile(filePath, 'utf-8')
const records: any[] = parse(content, {
columns: true,
skip_empty_lines: true,
trim: true,
})
// Detect collection type from file name or headers
if (filePath.includes('行銷放大鏡集') || records[0]?.['文章標題']) {
return parsePostsCSV(records as WebflowPostCsvRow[])
}
if (filePath.includes('Categories') || filePath.includes('分類')) {
return parseCategoriesCSV(records as WebflowCategoryCsvRow[])
}
if (filePath.includes('Portfolio') || filePath.includes('作品')) {
return parsePortfolioCSV(records as WebflowPortfolioCsvRow[])
}
// Default: try to detect from structure
if (records[0]?.['文章標題'] || records[0]?.['發文內容']) {
return parsePostsCSV(records as WebflowPostCsvRow[])
}
return { posts: [], categories: [], portfolio: [] }
}
// ============================================================
// POSTS CSV PARSER
// ============================================================
/**
* Parse Posts collection CSV
* Webflow CSV headers: 文章標題, Slug, ..., 強調圖片, 發文日期, 文章分類, 發文內容, ...
*/
function parsePostsCSV(records: WebflowPostCsvRow[]): WebflowExportData {
const posts: WebflowPost[] = []
const categoryNames = new Set<string>()
for (const row of records) {
// Skip archived posts if needed
if (row.Archived === 'true') continue
// Extract category name
const categoryName = row['文章分類'] || ''
if (categoryName) {
categoryNames.add(categoryName)
}
// Parse published date
const publishedDate = parseWebflowDate(row['發文日期'] || row['Published On'] || row['Created On'])
posts.push({
title: row['文章標題'] || '',
slug: row.Slug || '',
content: row['發文內容'] || '',
publishedDate,
postCategory: categoryName || undefined,
featuredImage: row['強調圖片'] || undefined,
seoTitle: undefined, // Could be extracted from content if needed
seoDescription: row['文章簡述'] || undefined,
excerpt: row['文章簡述'] || undefined,
})
}
// Generate categories from posts
const categories = generateCategoriesFromPosts(Array.from(categoryNames))
return { posts, categories, portfolio: [] }
}
// ============================================================
// CATEGORIES CSV PARSER
// ============================================================
/**
* Parse Categories collection CSV
*/
function parseCategoriesCSV(records: WebflowCategoryCsvRow[]): WebflowExportData {
const categories: WebflowCategory[] = []
// Known categories with colors (from story requirements)
const knownCategories: Record<string, string> = {
'google-xiao-xue-tang': '#4285f4', // Google blue
'google-workshop': '#4285f4',
'meta-xiao-xue-tang': '#0668e1', // Meta blue
'meta-workshop': '#0668e1',
'xing-xiao-shi-shi-zui-qian-xian': '#34a853', // Green
'marketing-news': '#34a853',
'enchun-announcements': '#ea4335', // Red
'恩群數位最新公告': '#ea4335',
}
for (const row of records) {
const name = row.name || ''
const slug = row.slug || ''
categories.push({
name,
slug,
colorHex: knownCategories[slug] || knownCategories[name] || '#0066cc',
})
}
return { posts: [], categories, portfolio: [] }
}
// ============================================================
// PORTFOLIO CSV PARSER
// ============================================================
/**
* Parse Portfolio collection CSV
*/
function parsePortfolioCSV(records: WebflowPortfolioCsvRow[]): WebflowExportData {
const portfolio: any[] = []
for (const row of records) {
// Map website type strings to enum values
const typeMapping: Record<string, 'corporate' | 'ecommerce' | 'landing' | 'brand' | 'other'> = {
'corporate': 'corporate',
'ecommerce': 'ecommerce',
'landing': 'landing',
'brand': 'brand',
}
const websiteType = typeMapping[row['website-type']?.toLowerCase()] || 'other'
portfolio.push({
name: row.Name || '',
slug: row.Slug || '',
websiteLink: row['website-link'] || '',
previewImage: row['preview-image'] || '',
description: row.description || '',
websiteType,
tags: row.tags || '',
})
}
return { posts: [], categories: [], portfolio }
}
// ============================================================
// HELPER FUNCTIONS
// ============================================================
/**
* Parse Webflow date format to Date object
* Webflow dates: "Thu Jan 20 2022 00:00:00 GMT+0000 (Coordinated Universal Time)"
*/
function parseWebflowDate(dateStr: string): Date {
if (!dateStr) return new Date()
// Remove timezone info and parse
const cleanDate = dateStr.replace(/\(.*\)$/, '').trim()
const parsed = new Date(cleanDate)
return isNaN(parsed.getTime()) ? new Date() : parsed
}
/**
* Generate category objects from category names found in posts
*/
function generateCategoriesFromPosts(categoryNames: string[]): WebflowCategory[] {
const nameToSlug: Record<string, string> = {
'Google小學堂': 'google-xiao-xue-tang',
'Meta小學堂': 'meta-xiao-xue-tang',
'行銷時事最前線': 'xing-xiao-shi-shi-zui-qian-xian',
'恩群數位最新公告': 'enchun-announcements',
}
const slugToColor: Record<string, string> = {
'google-xiao-xue-tang': '#4285f4',
'meta-xiao-xue-tang': '#0668e1',
'xing-xiao-shi-shi-zui-qian-xian': '#34a853',
'enchun-announcements': '#ea4335',
}
const categories: WebflowCategory[] = []
const seen = new Set<string>()
for (const name of categoryNames) {
if (seen.has(name)) continue
seen.add(name)
const slug = nameToSlug[name] || toSlug(name)
const colorHex = slugToColor[slug] || '#0066cc'
categories.push({ name, slug, colorHex })
}
return categories
}
/**
* Convert string to URL-friendly slug (Chinese-friendly)
*/
function toSlug(value: string): string {
return value
.toString()
.toLowerCase()
.trim()
.normalize('NFD')
.replace(/[\u0300-\u036f]/g, '')
.replace(/[^a-z0-9\u4e00-\u9fa5/-]/g, '-')
.replace(/-+/g, '-')
.replace(/^-+|-+$/g, '')
}
// ============================================================
// BATCH CSV PARSER
// ============================================================
/**
* Parse multiple CSV files at once
*/
export async function parseMultipleCSVs(filePaths: string[]): Promise<WebflowExportData> {
const combined: WebflowExportData = {
posts: [],
categories: [],
portfolio: [],
}
for (const filePath of filePaths) {
try {
const data = await parseWebflowCSV(filePath)
if (data.posts) combined.posts?.push(...data.posts)
if (data.categories) combined.categories?.push(...data.categories)
if (data.portfolio) combined.portfolio?.push(...data.portfolio)
} catch (error) {
console.error(`Error parsing ${filePath}:`, error)
}
}
// Deduplicate categories by slug
if (combined.categories) {
const seen = new Set<string>()
combined.categories = combined.categories.filter((cat) => {
if (seen.has(cat.slug)) return false
seen.add(cat.slug)
return true
})
}
return combined
}

View File

@@ -0,0 +1,54 @@
#!/usr/bin/env tsx
import { config as dotenvConfig } from 'dotenv'
dotenvConfig({ path: '.env' })
import { getPayload } from 'payload'
import config from '../../src/payload.config'
import { parseWebflowCSV } from './csvParser'
import { transformPosts } from './transformers'
async function main() {
const payload = await getPayload({ config })
const data = await parseWebflowCSV('/Users/pukpuk/Dev/website-enchun-mgr/恩群數位行銷 - 行銷放大鏡集.csv')
const post = data.posts.find((p: any) => p.title.includes('一點都不難'))
if (!post) {
console.log('Post not found')
return
}
const transformed = transformPosts([post])[0]
console.log('Title:', transformed.title)
console.log('Content type:', typeof transformed.content)
// Try create without content first
try {
const result = await payload.create({
collection: 'posts',
data: {
title: transformed.title,
slug: transformed.slug + '-test',
status: 'draft',
},
})
console.log('Created without content:', result.id)
// Now update with content
await payload.update({
collection: 'posts',
id: result.id,
data: { content: transformed.content },
})
console.log('Updated with content successfully!')
// Verify
const updated = await payload.findByID({ collection: 'posts', id: result.id, depth: 0 })
console.log('Verified content type:', typeof updated.content)
} catch (error: any) {
console.log('Error:', error.message)
}
}
main().catch(console.error)

View File

@@ -0,0 +1,144 @@
/**
* Deduplication Module
* Story 1.3: Content Migration Script
*
* Checks for existing items to prevent duplicates
*/
import type { Payload } from 'payload'
export interface DuplicateCheckOptions {
force?: boolean // Skip deduplication check
}
// ============================================================
// FIND EXISTING BY SLUG
// ============================================================
/**
* Check if a document exists by slug
*/
export async function findBySlug(
payload: Payload,
collection: string,
slug: string,
): Promise<{ exists: boolean; id?: string }> {
try {
const result = await payload.find({
collection,
where: {
slug: { equals: slug },
},
limit: 1,
depth: 0,
})
if (result.docs && result.docs.length > 0) {
return { exists: true, id: result.docs[0].id }
}
return { exists: false }
} catch (error) {
console.error(`Error checking for duplicate ${collection} with slug "${slug}":`, error)
return { exists: false }
}
}
/**
* Check if post exists by slug and published date
*/
export async function findBySlugAndDate(
payload: Payload,
slug: string,
publishedAt: Date,
): Promise<{ exists: boolean; id?: string }> {
try {
const result = await payload.find({
collection: 'posts',
where: {
and: [
{
slug: { equals: slug },
},
{
publishedAt: { equals: publishedAt },
},
],
},
limit: 1,
depth: 0,
})
if (result.docs && result.docs.length > 0) {
return { exists: true, id: result.docs[0].id }
}
return { exists: false }
} catch (error) {
console.error(`Error checking for duplicate post with slug "${slug}":`, error)
return { exists: false }
}
}
// ============================================================
// BULK EXISTENCE CHECK
// ============================================================
/**
* Get all existing slugs for a collection
*/
export async function getAllSlugs(payload: Payload, collection: string): Promise<Set<string>> {
try {
const result = await payload.find({
collection,
limit: 1000, // Adjust based on expected data size
depth: 0,
select: { slug: true },
})
const slugs = new Set<string>()
if (result.docs) {
for (const doc of result.docs) {
if ('slug' in doc && typeof doc.slug === 'string') {
slugs.add(doc.slug)
}
}
}
return slugs
} catch (error) {
console.error(`Error getting existing slugs for ${collection}:`, error)
return new Set()
}
}
/**
* Get existing posts by slug + date combination
*/
export async function getExistingPostIdentifiers(
payload: Payload,
): Promise<Map<string, Date>> {
try {
const result = await payload.find({
collection: 'posts',
limit: 1000,
depth: 0,
select: { slug: true, publishedAt: true },
})
const identifiers = new Map<string, Date>()
if (result.docs) {
for (const doc of result.docs) {
if ('slug' in doc && 'publishedAt' in doc) {
const key = `${doc.slug}-${doc.publishedAt}`
identifiers.set(key, doc.publishedAt as Date)
}
}
}
return identifiers
} catch (error) {
console.error('Error getting existing post identifiers:', error)
return new Map()
}
}

View File

@@ -0,0 +1,52 @@
#!/usr/bin/env tsx
import { config as dotenvConfig } from 'dotenv'
import { resolve, dirname } from 'path'
import { fileURLToPath } from 'url'
const __filename = fileURLToPath(import.meta.url)
const __dirname = dirname(__filename)
const envPath = resolve(__dirname, '../../.env')
dotenvConfig({ path: envPath })
import { parseWebflowCSV } from './csvParser'
import { htmlToLexical } from './lexicalConverter'
async function main() {
const data = await parseWebflowCSV('/Users/pukpuk/Dev/website-enchun-mgr/恩群數位行銷 - 行銷放大鏡集.csv')
const successPost = data.posts.find((p: any) => p.title === '正確的 hashtag 帶你上天堂')
const failPost = data.posts.find((p: any) => p.title.includes('一點都不難'))
const successLexical = htmlToLexical(successPost.content || '')
const failLexical = htmlToLexical(failPost.content || '')
const successParsed = JSON.parse(successLexical)
const failParsed = JSON.parse(failLexical)
// Check the actual content structure
console.log('=== SUCCESSFUL POST FIRST PARAGRAPH ===')
console.log(JSON.stringify(successParsed.root?.children?.[0], null, 2))
console.log('\n=== FAILED POST FIRST PARAGRAPH ===')
console.log(JSON.stringify(failParsed.root?.children?.[0], null, 2))
// Look for differences in text node properties
console.log('\n=== TEXT NODE COMPARISON ===')
const successTextNode = successParsed.root?.children?.[0]?.children?.[0]
const failTextNode = failParsed.root?.children?.[0]?.children?.[0]
console.log('Success text node:', successTextNode)
console.log('Fail text node:', failTextNode)
// Check for format or detail properties
console.log('\n=== PROPERTY CHECK ===')
console.log('Success has format:', successTextNode?.format !== undefined)
console.log('Fail has format:', failTextNode?.format !== undefined)
console.log('Success has detail:', successTextNode?.detail !== undefined)
console.log('Fail has detail:', failTextNode?.detail !== undefined)
console.log('Success has mode:', successTextNode?.mode !== undefined)
console.log('Fail has mode:', failTextNode?.mode !== undefined)
}
main().catch(console.error)

View File

@@ -0,0 +1,46 @@
#!/usr/bin/env tsx
/**
* Delete migrated posts and re-migrate with correct format
*/
import { config as dotenvConfig } from 'dotenv'
dotenvConfig({ path: '.env' })
import { getPayload } from 'payload'
import config from '../../src/payload.config'
async function main() {
const payload = await getPayload({ config })
console.log('🗑️ Deleting migrated posts (except NEW POST)...')
// Get all posts except NEW POST
const posts = await payload.find({
collection: 'posts',
limit: 100,
depth: 0,
})
let deleted = 0
for (const post of posts.docs) {
if (post.title !== 'NEW POST') {
try {
await payload.delete({
collection: 'posts',
id: post.id,
})
deleted++
} catch (e) {
// Ignore revalidation errors
console.log(`Deleted: ${post.title}`)
deleted++
}
}
}
console.log(`\n✅ Deleted ${deleted} posts`)
console.log('\n🚀 Now run the migration with corrected format:')
console.log(' pnpm tsx scripts/migration/migrate.ts --collection posts --source <your-csv-file>')
}
main().catch(console.error)

View File

@@ -0,0 +1,90 @@
#!/usr/bin/env tsx
/**
* Fix Post Content Structure
*
* Converts migrated posts from direct Lexical format to Payload's {root: ...} format
*/
import { config as dotenvConfig } from 'dotenv'
dotenvConfig({ path: '.env' })
import { getPayload } from 'payload'
import config from '../../src/payload.config'
async function main() {
const payload = await getPayload({ config })
console.log('🔧 Fixing post content structure...\n')
const posts = await payload.find({
collection: 'posts',
limit: 100,
depth: 0,
})
console.log(`Found ${posts.totalDocs} posts to check\n`)
let fixed = 0
let skipped = 0
for (const post of posts.docs) {
const id = post.id
const title = post.title?.substring(0, 40)
// Check if content is an object (needs fixing)
if (post.content && typeof post.content === 'object') {
// Check if it's already in correct format { root: {...} }
if (post.content.root && typeof post.content.root === 'object') {
console.log(`⏭️ Skipping (already correct): ${title}`)
skipped++
continue
}
// Fix: wrap in { root: ... } structure
const fixedContent = JSON.stringify({ root: post.content })
try {
await payload.update({
collection: 'posts',
id,
data: {
content: fixedContent,
},
})
console.log(`✓ Fixed: ${title}`)
fixed++
} catch (error) {
console.error(`✗ Failed to fix "${title}":`, error)
}
} else if (post.content && typeof post.content === 'string') {
// String content - check if it needs fixing
try {
const parsed = JSON.parse(post.content)
if (parsed.root) {
console.log(`⏭️ Skipping (already correct): ${title}`)
skipped++
} else {
// Need to wrap in { root: ... }
const fixedContent = JSON.stringify({ root: parsed })
await payload.update({
collection: 'posts',
id,
data: { content: fixedContent },
})
console.log(`✓ Fixed: ${title}`)
fixed++
}
} catch (e) {
console.log(`⏭️ Skipping (invalid JSON): ${title}`)
skipped++
}
} else {
skipped++
}
}
console.log(`\n✅ Fixed ${fixed} posts`)
console.log(`⏭️ Skipped ${skipped} posts`)
}
main().catch(console.error)

View File

@@ -0,0 +1,62 @@
#!/usr/bin/env tsx
/**
* Fix Post Content Format
*
* Converts post content from object to JSON string format
* for Payload CMS richText field compatibility
*/
import { config as dotenvConfig } from 'dotenv'
dotenvConfig({ path: '.env' })
import { getPayload } from 'payload'
import config from '../../src/payload.config'
async function main() {
const payload = await getPayload({ config })
console.log('🔧 Fixing post content format...')
const posts = await payload.find({
collection: 'posts',
limit: 100,
depth: 0,
})
console.log(`Found ${posts.totalDocs} posts to check`)
let fixed = 0
let skipped = 0
for (const post of posts.docs) {
// Check if content is an object (needs fixing)
if (post.content && typeof post.content === 'object') {
const id = post.id
const title = post.title?.substring(0, 40) + '...'
// Convert to JSON string
const contentJson = JSON.stringify(post.content)
try {
await payload.update({
collection: 'posts',
id,
data: {
content: contentJson,
},
})
console.log(`✓ Fixed: ${title}`)
fixed++
} catch (error) {
console.error(`✗ Failed to fix ${title}:`, error)
}
} else {
skipped++
}
}
console.log(`\n✅ Fixed ${fixed} posts`)
console.log(`⏭️ Skipped ${skipped} posts (already in correct format)`)
}
main().catch(console.error)

View File

@@ -0,0 +1,349 @@
/**
* HTML Parser Module
* Story 1.3: Content Migration Script
*
* Parses HTML files from Webflow to extract structured data
* Used when JSON export is not available
*/
import type { WebflowExportData } from './types'
import { toSlug, cleanHTML, htmlToPlainText } from './utils'
import { load as cheerioLoad, CheerioAPI } from 'cheerio'
// ============================================================
// MAIN PARSER FUNCTION
// ============================================================
/**
* Parse HTML content and extract Webflow data
*/
export function parseWebflowHTML(html: string, sourceUrl?: string): WebflowExportData {
const $ = cheerioLoad(html)
return {
posts: extractPosts($),
categories: extractCategories($),
portfolio: extractPortfolio($),
}
}
/**
* Parse HTML file from disk
*/
export async function parseHTMLFile(filePath: string): Promise<WebflowExportData> {
const { readFile } = await import('fs/promises')
const html = await readFile(filePath, 'utf-8')
return parseWebflowHTML(html)
}
// ============================================================
// POST EXTRACTION
// ============================================================
/**
* Extract blog posts from HTML
* This is a generic extractor - customize based on actual Webflow HTML structure
*/
function extractPosts($: CheerioAPI): Array<{
title: string
slug: string
content: string
publishedDate: string
postCategory?: string
featuredImage?: string
seoTitle?: string
seoDescription?: string
}> {
const posts: any[] = []
// Common Webflow blog post selectors
const postSelectors = [
'.w-dyn-item', // Webflow collection item
'.blog-post',
'.post-item',
'article',
]
for (const selector of postSelectors) {
const items = $(selector)
if (items.length > 0) {
items.each((_index, element) => {
const $item = $(element)
// Extract title
const title =
$item.find('h1, h2, h3, .post-title, .blog-title').first().text().trim() ||
$item.find('[data-field="title"]').text().trim()
if (!title) return
// Extract slug from link or data attribute
const link = $item.find('a').first().attr('href') || ''
const slug = link
? link.split('/').filter(Boolean).pop()
: toSlug(title)
// Extract content
const contentEl = $item.find('.post-content, .blog-content, .content').first()
const content = contentEl.length ? cleanHTML(contentEl.html() || '') : ''
// Extract date
const dateStr =
$item.find('.post-date, .blog-date, .date, time').first().text().trim() ||
$item.find('time').first().attr('datetime') ||
new Date().toISOString()
// Extract category
const category =
$item.find('.post-category, .blog-category, .category').first().text().trim() ||
$item.find('[data-field="category"]').text().trim()
// Extract image
const image =
$item.find('img').first().attr('src') ||
$item.find('[data-field="featured-image"]').attr('src') ||
undefined
// Extract SEO meta
const seoTitle =
$item.find('meta[property="og:title"]').attr('content') ||
$item.find('[data-field="seo-title"]').attr('content') ||
undefined
const seoDescription =
$item.find('meta[property="og:description"]').attr('content') ||
$item.find('[data-field="seo-description"]').attr('content') ||
undefined
posts.push({
title,
slug: slug || toSlug(title),
content,
publishedDate: dateStr,
postCategory: category || undefined,
featuredImage: image,
seoTitle,
seoDescription,
})
})
// If we found posts, break
if (posts.length > 0) {
break
}
}
}
return posts
}
// ============================================================
// CATEGORY EXTRACTION
// ============================================================
/**
* Extract categories from HTML
*/
function extractCategories($: CheerioAPI): Array<{
name: string
slug: string
colorHex?: string
}> {
const categories: any[] = []
// Common category selectors
const categorySelectors = [
'.category-link',
'.post-category',
'.filter-category',
'[data-field="category"]',
]
const uniqueCategories = new Set<string>()
for (const selector of categorySelectors) {
const items = $(selector)
if (items.length > 0) {
items.each((_index, element) => {
const $item = $(element)
const name = $item.text().trim() || $item.attr('data-category') || ''
if (name && !uniqueCategories.has(name)) {
uniqueCategories.add(name)
// Try to extract color from style attribute
const style = $item.attr('style') || ''
const colorMatch = style.match(/color:\s*#?([a-f0-9]{6}|[a-f0-9]{3})/i)
const colorHex = colorMatch ? `#${colorMatch[1]}` : undefined
categories.push({
name,
slug: toSlug(name),
colorHex,
})
}
})
}
}
// Known categories from the story
const knownCategories = [
{ name: 'Google小學堂', slug: 'google-workshop' },
{ name: 'Meta小學堂', slug: 'meta-workshop' },
{ name: '行銷時事最前線', slug: 'marketing-news' },
{ name: '恩群數位最新公告', slug: 'enchun-announcements' },
]
// Merge known categories if no categories found
if (categories.length === 0) {
return knownCategories.map((cat) => ({
...cat,
colorHex: '#0066cc', // Default blue color
}))
}
return categories
}
// ============================================================
// PORTFOLIO EXTRACTION
// ============================================================
/**
* Extract portfolio items from HTML
*/
function extractPortfolio($: CheerioAPI): Array<{
name: string
slug: string
websiteLink: string
previewImage: string
description: string
websiteType: 'corporate' | 'ecommerce' | 'landing' | 'brand' | 'other'
tags: string
}> {
const portfolio: any[] = []
// Common portfolio selectors
const portfolioSelectors = [
'.portfolio-item',
'.work-item',
'.project-item',
'.case-study',
]
for (const selector of portfolioSelectors) {
const items = $(selector)
if (items.length > 0) {
items.each((_index, element) => {
const $item = $(element)
// Extract title/name
const name =
$item.find('h2, h3, h4, .portfolio-title, .project-title').first().text().trim() ||
$item.find('[data-field="name"]').text().trim()
if (!name) return
// Extract link
const link =
$item.find('a').first().attr('href') ||
$item.find('[data-field="website-link"]').attr('href') ||
''
// Extract image
const image =
$item.find('img').first().attr('src') ||
$item.find('[data-field="preview-image"]').attr('src') ||
''
// Extract description
const description =
$item.find('.portfolio-description, .project-description, .description')
.first()
.text()
.trim() || ''
// Extract tags
const tags = $item.find('.tag, .tags').first().text().trim() || ''
// Determine website type from tags or class
const typeClass = Array.from(element.classList).find((c) =>
['corporate', 'ecommerce', 'landing', 'brand', 'other'].includes(c),
)
const websiteType = (typeClass as any) || 'other'
portfolio.push({
name,
slug: toSlug(name),
websiteLink: link,
previewImage: image,
description,
websiteType,
tags,
})
})
// If we found portfolio items, break
if (portfolio.length > 0) {
break
}
}
}
return portfolio
}
// ============================================================
// URL EXTRACTION
// ============================================================
/**
* Extract all image URLs from HTML
*/
export function extractImageUrls(html: string): string[] {
const $ = cheerioLoad(html)
const urls = new Set<string>()
$('img').each((_index, element) => {
const src = $(element).attr('src')
const dataSrc = $(element).attr('data-src')
const srcset = $(element).attr('srcset')
if (src) urls.add(src)
if (dataSrc) urls.add(dataSrc)
if (srcset) {
srcset.split(',').forEach((s) => {
const url = s.trim().split(' ')[0]
if (url) urls.add(url)
})
}
})
return Array.from(urls)
}
/**
* Extract all media URLs from parsed data
*/
export function extractMediaUrls(data: WebflowExportData): string[] {
const urls = new Set<string>()
// From posts
if (data.posts) {
for (const post of data.posts) {
if (post.featuredImage) urls.add(post.featuredImage)
}
}
// From portfolio
if (data.portfolio) {
for (const item of data.portfolio) {
if (item.previewImage) urls.add(item.previewImage)
}
}
return Array.from(urls)
}

View File

@@ -0,0 +1,52 @@
#!/usr/bin/env tsx
import { config as dotenvConfig } from 'dotenv'
import { resolve, dirname } from 'path'
import { fileURLToPath } from 'url'
const __filename = fileURLToPath(import.meta.url)
const __dirname = dirname(__filename)
const envPath = resolve(__dirname, '../../.env')
dotenvConfig({ path: envPath })
import { parseWebflowCSV } from './csvParser'
import { htmlToLexical } from './lexicalConverter'
async function main() {
const data = await parseWebflowCSV('/Users/pukpuk/Dev/website-enchun-mgr/恩群數位行銷 - 行銷放大鏡集.csv')
const failPost = data.posts.find((p: any) => p.title.includes('一點都不難'))
const lexical = htmlToLexical(failPost.content || '')
const parsed = JSON.parse(lexical)
console.log('Root type:', parsed.root?.type)
console.log('Root version:', parsed.root?.version)
console.log('Children count:', parsed.root?.children?.length)
// Check each child
let issueCount = 0
parsed.root?.children?.forEach((child: any, i: number) => {
if (!child.type) {
console.log('Child', i, 'missing type')
issueCount++
}
if (!child.version) {
console.log('Child', i, 'missing version')
issueCount++
}
if (!child.children || !Array.isArray(child.children)) {
console.log('Child', i, 'missing or invalid children array')
issueCount++
}
})
console.log('Issues found:', issueCount)
// Show first few children
console.log('\nFirst 3 children:')
parsed.root?.children?.slice(0, 3).forEach((child: any, i: number) => {
console.log(`[${i}]`, JSON.stringify(child, null, 2))
})
}
main().catch(console.error)

View File

@@ -0,0 +1,29 @@
#!/usr/bin/env tsx
import { config as dotenvConfig } from 'dotenv'
dotenvConfig({ path: '.env' })
import { parseWebflowCSV } from './csvParser'
import { transformPosts } from './transformers'
async function main() {
const data = await parseWebflowCSV('/Users/pukpuk/Dev/website-enchun-mgr/恩群數位行銷 - 行銷放大鏡集.csv')
const failPost = data.posts.find((p: any) => p.title.includes('一點都不難'))
const transformed = transformPosts([failPost])[0]
console.log('=== TRANSFORMED POST DATA ===')
console.log('title:', transformed.title)
console.log('slug:', transformed.slug)
console.log('publishedAt:', transformed.publishedAt)
console.log('status:', transformed.status)
console.log('excerpt:', transformed.excerpt?.substring(0, 100))
console.log('content type:', typeof transformed.content)
console.log('content length:', transformed.content?.length)
// Parse and check content
const contentParsed = JSON.parse(transformed.content)
console.log('content.root exists:', contentParsed.root !== undefined)
console.log('content.root.type:', contentParsed.root?.type)
}
main().catch(console.error)

View File

@@ -0,0 +1,572 @@
/**
* HTML to Lexical JSON Converter
* Story 1.3: Content Migration Script
*
* Converts HTML content to Payload CMS Lexical editor format
*/
import { parse } from 'html-parse-stringify'
// ============================================================
// LEXICAL JSON TYPES
// ============================================================
interface LexicalNode {
type: string
version: number
[key: string]: any
}
interface LexicalTextContent {
type: 'text'
version: 1
detail?: { 0: any; 1: any }
format?: number
mode?: string
style?: string
text: string
}
interface LexicalElementNode {
type: 'element' | 'heading' | 'link' | 'list' | 'listitem' | 'quote' | 'paragraph'
version: 1
children: LexicalContent[]
direction?: 'ltr' | 'rtl' | null
format?: '' | 'left' | 'start' | 'center' | 'right' | 'end' | 'justify'
indent?: number
tag?: string
listType?: 'bullet' | 'number'
rel?: null | string
target?: null | string
title?: null | string
url?: string
}
interface LexicalLinebreakNode {
type: 'linebreak'
version: 1
}
interface LexicalRoot {
type: 'root'
version: 1
children: LexicalElementNode[]
direction: 'ltr' | 'rtl' | null
}
type LexicalContent = LexicalTextContent | LexicalElementNode | LexicalLinebreakNode
// ============================================================
// HTML TO LEXICAL CONVERTER
// ============================================================
/**
* Convert HTML string to Lexical JSON format (returns object for Payload local API)
*
* IMPORTANT: Payload's richText field expects content wrapped in { "root": {...} } structure
*/
export function htmlToLexical(html: string): string {
if (!html || typeof html !== 'string') {
return createEmptyLexical()
}
// Clean the HTML first
const cleanedHtml = cleanHtml(html)
try {
const ast = parse(cleanedHtml)
const children = convertNodes(ast)
// Clean up empty text nodes that Payload doesn't accept
const cleanedChildren = cleanEmptyTextNodes(children)
const lexicalObject = {
type: 'root',
version: 1,
children: cleanedChildren.length > 0 ? cleanedChildren : [createEmptyParagraph()],
direction: null,
} satisfies LexicalRoot
// Wrap in { "root": ... } structure for Payload's richText field
// This is the format Payload expects when storing Lexical content
return JSON.stringify({ root: lexicalObject })
} catch (error) {
console.warn('Failed to parse HTML, using fallback:', error)
return createTextLexical(cleanedHtml)
}
}
/**
* Convert HTML string to Lexical object (for direct use with Payload local API)
* Returns { root: LexicalRoot } format for Payload richText field
*/
export function htmlToLexicalObject(html: string): { root: LexicalRoot } {
if (!html || typeof html !== 'string') {
return JSON.parse(createEmptyLexical())
}
// Clean the HTML first
const cleanedHtml = cleanHtml(html)
try {
const ast = parse(cleanedHtml)
const children = convertNodes(ast)
return {
root: {
type: 'root',
version: 1,
children: children.length > 0 ? children : [createEmptyParagraph()],
direction: null,
},
}
} catch (error) {
console.warn('Failed to parse HTML, using fallback:', error)
return JSON.parse(createTextLexical(cleanedHtml))
}
}
/**
* Create empty Lexical JSON structure
*/
function createEmptyLexical(): string {
return JSON.stringify({
root: {
type: 'root',
version: 1,
children: [createEmptyParagraph()],
direction: null,
},
})
}
/**
* Create Lexical JSON with plain text (fallback)
*/
function createTextLexical(text: string): string {
return JSON.stringify({
root: {
type: 'root',
version: 1,
children: [
{
type: 'paragraph',
version: 1,
children: [createTextNode(text)],
},
],
direction: null,
},
})
}
/**
* Create an empty paragraph node
*/
function createEmptyParagraph(): LexicalElementNode {
return {
type: 'paragraph',
version: 1,
children: [createTextNode('')],
}
}
/**
* Clean empty text nodes from Lexical tree
* Payload's Lexical validator rejects empty text nodes
*/
function cleanEmptyTextNodes(nodes: LexicalElementNode[]): LexicalElementNode[] {
return nodes
.map((node) => {
// Clean children recursively
if (node.children && Array.isArray(node.children)) {
const cleanedChildren = node.children
.filter((child: any) => {
// Remove empty text nodes
if (child.type === 'text' && child.text === '') {
return false
}
return true
})
.map((child: any) => {
// If child has children, clean those too
if (child.children && Array.isArray(child.children)) {
return {
...child,
children: child.children.filter((c: any) => {
if (c.type === 'text' && c.text === '') {
return false
}
return true
}),
}
}
return child
})
// If all children were removed, add an empty text node
if (cleanedChildren.length === 0) {
return { ...node, children: [createTextNode('')] }
}
return { ...node, children: cleanedChildren }
}
return node
})
.filter((node) => {
// Remove nodes that became invalid after cleaning
return node.type !== 'linebreak'
})
}
/**
* Clean HTML by removing unwanted elements
*/
function cleanHtml(html: string): string {
return html
// Remove script and style tags
.replace(/<script\b[^<]*(?:(?!<\/script>)<[^<]*)*<\/script>/gi, '')
.replace(/<style\b[^<]*(?:(?!<\/style>)<[^<]*)*<\/style>/gi, '')
// Remove Webflow-specific attributes
.replace(/\sdata-[a-z-]+="[^"]*"/gi, '')
.replace(/\sclass="[^"]*"/gi, '')
// Clean up empty tags
.replace(/<p>\s*<\/p>/gi, '')
.replace(/<div>\s*<\/div>/gi, '')
.trim()
}
/**
* Convert HTML AST nodes to Lexical nodes
*/
function convertNodes(nodes: any[]): LexicalElementNode[] {
const result: LexicalElementNode[] = []
let currentList: LexicalElementNode | null = null
let listItems: LexicalElementNode[] = []
for (const node of nodes) {
// Handle text nodes
if (node.type === 'text') {
const text = node.value?.trim()
if (text) {
result.push({
type: 'paragraph',
version: 1,
children: [createTextNode(text)],
})
}
continue
}
if (!node.name) continue
const tag = node.name.toLowerCase()
// Handle headings
if (['h1', 'h2', 'h3', 'h4', 'h5', 'h6'].includes(tag)) {
flushList(result, currentList, listItems)
currentList = null
listItems = []
result.push(createHeading(tag, node.children || []))
continue
}
// Handle paragraphs
if (tag === 'p') {
flushList(result, currentList, listItems)
currentList = null
listItems = []
const content = convertInlineNodes(node.children || [])
if (content.length > 0) {
result.push({
type: 'paragraph',
version: 1,
children: content,
})
}
continue
}
// Handle lists
if (tag === 'ul' || tag === 'ol') {
flushList(result, currentList, listItems)
currentList = {
type: 'list',
version: 1,
listType: tag === 'ol' ? 'number' : 'bullet',
children: [],
}
listItems = convertListItems(node.children || [])
continue
}
// Handle blockquotes
if (tag === 'blockquote') {
flushList(result, currentList, listItems)
currentList = null
listItems = []
const content = convertInlineNodes(node.children || [])
result.push({
type: 'quote',
version: 1,
children: content,
})
continue
}
// Handle divs (treat as paragraphs)
if (tag === 'div') {
flushList(result, currentList, listItems)
currentList = null
listItems = []
const content = convertInlineNodes(node.children || [])
if (content.length > 0) {
result.push({
type: 'paragraph',
version: 1,
children: content,
})
}
continue
}
// Handle line breaks and horizontal rules
if (tag === 'br') {
result.push({
type: 'paragraph',
version: 1,
children: [{ type: 'linebreak', version: 1 } as any],
})
continue
}
if (tag === 'hr') {
result.push({
type: 'paragraph',
version: 1,
children: [createTextNode('---')],
})
continue
}
// Handle images
if (tag === 'img') {
flushList(result, currentList, listItems)
currentList = null
listItems = []
const src = node.attributes?.src || ''
const alt = node.attributes?.alt || ''
result.push(createImageNode(src, alt))
continue
}
}
// Flush any remaining list
flushList(result, currentList, listItems)
return result.length > 0 ? result : [createEmptyParagraph()]
}
/**
* Flush pending list items to result
*/
function flushList(
result: LexicalElementNode[],
list: LexicalElementNode | null,
items: LexicalElementNode[],
): void {
if (list && items.length > 0) {
list.children = items
result.push(list)
}
}
/**
* Convert list items (li) to Lexical format
*/
function convertListItems(items: any[]): LexicalElementNode[] {
return items
.filter((item) => item.name?.toLowerCase() === 'li')
.map((item) => ({
type: 'listitem',
version: 1,
children: convertInlineNodes(item.children || []),
}))
}
/**
* Create a standard text node with all required Lexical properties
*/
function createTextNode(text: string, format?: number): LexicalTextContent {
return {
type: 'text',
version: 1,
text,
detail: 0,
format: format ?? 0,
mode: 'normal',
style: '',
}
}
/**
* Convert inline nodes (text, links, formatting)
*/
function convertInlineNodes(nodes: any[]): LexicalContent[] {
const result: LexicalContent[] = []
for (const node of nodes) {
// Handle text nodes (html-parse-stringify uses type for text)
if (node.type === 'text') {
const text = (node.value || node.content || '') as string
if (text) {
result.push(createTextNode(text))
}
continue
}
// Skip if no element name (not an element)
if (!node.name && !node.type) continue
const tag = node.name.toLowerCase()
// Handle links
// NOTE: Payload's Lexical link validation is very strict. For now, convert links to text
// TODO: Implement proper link format after investigating Payload's link node requirements
if (tag === 'a') {
// Convert links to text with URL in parentheses
const text = extractText(node.children || [])
const href = node.attrs?.href || node.attributes?.href || ''
if (text) {
// Include URL as text for now
const linkText = href && href !== '#' ? `${text} (${href})` : text
result.push(createTextNode(linkText))
}
continue
}
// Handle bold (strong, b)
if (tag === 'strong' || tag === 'b') {
const text = extractText(node.children || [])
result.push(createTextNode(text, 1)) // Bold format
continue
}
// Handle italic (em, i)
if (tag === 'em' || tag === 'i') {
const text = extractText(node.children || [])
result.push(createTextNode(text, 2)) // Italic format
continue
}
// Handle underline (u)
if (tag === 'u') {
const text = extractText(node.children || [])
result.push(createTextNode(text, 4)) // Underline format
continue
}
// Handle images inline
if (tag === 'img') {
const src = node.attrs?.src || node.attributes?.src || ''
const alt = node.attrs?.alt || node.attributes?.alt || ''
result.push(createImageNode(src, alt))
continue
}
// Handle spans (treat as text)
if (tag === 'span') {
const text = extractText(node.children || [])
if (text) {
result.push(createTextNode(text))
}
continue
}
// Handle code
if (tag === 'code') {
const text = extractText(node.children || [])
result.push({
...createTextNode(text),
style: 'font-family: monospace;',
})
continue
}
// Recursively handle other inline elements
const children = convertInlineNodes(node.children || [])
result.push(...children)
}
return result.length > 0 ? result : [createTextNode('')]
}
/**
* Create a heading node
*/
function createHeading(tag: string, children: any[]): LexicalElementNode {
const tagNum = parseInt(tag.substring(1), 10)
const inlineNodes = convertInlineNodes(children)
return {
type: 'heading',
version: 1,
tag: `h${tagNum}`,
children: inlineNodes.length > 0 ? inlineNodes : [createTextNode('')],
}
}
/**
* Create an image node
*/
function createImageNode(src: string, alt: string): LexicalElementNode {
return {
type: 'paragraph',
version: 1,
children: [
{
...createTextNode(`[Image: ${alt || src}]`),
style: 'font-style: italic;',
},
],
}
}
/**
* Extract plain text from nodes
*/
function extractText(nodes: any[]): string {
let text = ''
for (const node of nodes) {
if (node.type === 'text') {
text += node.value || node.content || ''
} else if (node.children) {
text += extractText(node.children)
} else if (node.content) {
text += node.content
}
}
return text
}
// ============================================================
// UTILITY FUNCTIONS
// ============================================================
/**
* Check if a string is valid Lexical JSON
*/
export function isValidLexical(json: string): boolean {
try {
const parsed = JSON.parse(json)
return parsed?.type === 'root' && Array.isArray(parsed?.children)
} catch {
return false
}
}
/**
* Convert multiple HTML contents to Lexical format
*/
export function batchHtmlToLexical(htmlArray: string[]): string[] {
return htmlArray.map((html) => htmlToLexical(html))
}

View File

@@ -0,0 +1,286 @@
/**
* Media Handler Module
* Story 1.3: Content Migration Script
*
* Downloads images from URLs and uploads to Payload CMS
*/
import type { Payload, File } from 'payload'
import type { MediaDownloadResult } from './types'
import { getFilenameFromUrl, getFileExtension } from './utils'
// ============================================================
// DOWNLOAD MEDIA
// ============================================================
/**
* Download an image from URL
*/
export async function downloadImage(
url: string,
retries: number = 3,
): Promise<MediaDownloadResult> {
let lastError: Error | undefined
for (let i = 0; i < retries; i++) {
try {
const response = await fetch(url, {
method: 'GET',
headers: {
'User-Agent':
'Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/91.0.4472.124 Safari/537.36',
},
signal: AbortSignal.timeout(30000), // 30 second timeout
})
if (!response.ok) {
throw new Error(`HTTP ${response.status}: ${response.statusText}`)
}
const buffer = Buffer.from(await response.arrayBuffer())
const filename = getFilenameFromUrl(url)
return {
success: true,
url,
buffer,
filename,
}
} catch (error) {
lastError = error as Error
// Wait before retry (exponential backoff)
if (i < retries - 1) {
await new Promise((resolve) => setTimeout(resolve, Math.pow(2, i) * 1000))
}
}
}
return {
success: false,
url,
error: lastError?.message || 'Unknown error',
}
}
/**
* Download multiple images in parallel batches
*/
export async function downloadImages(
urls: string[],
batchSize: number = 5,
): Promise<MediaDownloadResult[]> {
const results: MediaDownloadResult[] = []
for (let i = 0; i < urls.length; i += batchSize) {
const batch = urls.slice(i, i + batchSize)
const batchResults = await Promise.all(batch.map((url) => downloadImage(url)))
results.push(...batchResults)
}
return results
}
// ============================================================
// UPLOAD TO PAYLOAD CMS
// ============================================================
/**
* Upload a single image to Payload CMS Media collection
*/
export async function uploadToMedia(
payload: Payload,
downloadResult: MediaDownloadResult,
): Promise<{ success: boolean; id?: string; error?: string }> {
if (!downloadResult.success || !downloadResult.buffer || !downloadResult.filename) {
return {
success: false,
error: downloadResult.error || 'Invalid download result',
}
}
try {
const file: File = {
name: downloadResult.filename,
data: downloadResult.buffer,
mimetype: `image/${getFileExtension(downloadResult.filename)}`,
size: downloadResult.buffer.length,
}
const result = await payload.create({
collection: 'media',
file,
data: {
alt: downloadResult.filename,
},
})
return {
success: true,
id: result.id,
}
} catch (error) {
return {
success: false,
error: error instanceof Error ? error.message : 'Unknown error',
}
}
}
/**
* Check if media already exists by filename
*/
export async function findMediaByFilename(
payload: Payload,
filename: string,
): Promise<string | null> {
try {
const result = await payload.find({
collection: 'media',
where: {
filename: { equals: filename },
},
limit: 1,
depth: 0,
})
if (result.docs && result.docs.length > 0) {
return result.docs[0].id
}
return null
} catch {
return null
}
}
// ============================================================
// BATCH PROCESSING
// ============================================================
/**
* Process all media URLs: download and upload to Payload CMS
* Returns a map of original URL to Media ID
*/
export async function processMediaUrls(
payload: Payload,
urls: string[],
options: {
batchSize?: number
retries?: number
onProgress?: (current: number, total: number) => void
} = {},
): Promise<Map<string, string>> {
const { batchSize = 5, onProgress } = options
const urlToIdMap = new Map<string, string>()
// Filter out empty URLs
const validUrls = urls.filter((url) => url && isValidImageUrl(url))
for (let i = 0; i < validUrls.length; i += batchSize) {
const batch = validUrls.slice(i, i + batchSize)
// Download batch
const downloadResults = await Promise.all(
batch.map((url) => downloadImage(url, options.retries || 3)),
)
// Upload each downloaded image
for (const result of downloadResults) {
if (result.success && result.buffer && result.filename) {
// Check if already exists
const existingId = await findMediaByFilename(payload, result.filename)
if (existingId) {
urlToIdMap.set(result.url, existingId)
} else {
// Upload new
const uploadResult = await uploadToMedia(payload, result)
if (uploadResult.success && uploadResult.id) {
urlToIdMap.set(result.url, uploadResult.id)
}
}
}
// Report progress
if (onProgress) {
onProgress(urlToIdMap.size, validUrls.length)
}
}
}
return urlToIdMap
}
/**
* Process a single media URL with caching
*/
const mediaCache = new Map<string, string>()
export async function getOrCreateMedia(
payload: Payload,
url: string,
): Promise<string | undefined> {
// Check cache first
if (mediaCache.has(url)) {
return mediaCache.get(url)
}
if (!url || !isValidImageUrl(url)) {
return undefined
}
const downloadResult = await downloadImage(url)
if (!downloadResult.success || !downloadResult.buffer || !downloadResult.filename) {
return undefined
}
// Check if already exists
const existingId = await findMediaByFilename(payload, downloadResult.filename)
if (existingId) {
mediaCache.set(url, existingId)
return existingId
}
// Upload new
const uploadResult = await uploadToMedia(payload, downloadResult)
if (uploadResult.success && uploadResult.id) {
mediaCache.set(url, uploadResult.id)
return uploadResult.id
}
return undefined
}
// ============================================================
// UTILITIES
// ============================================================
/**
* Check if URL is a valid image
*/
function isValidImageUrl(url: string): boolean {
if (!url) {
return false
}
try {
const urlObj = new URL(url)
const pathname = urlObj.pathname.toLowerCase()
return (
pathname.endsWith('.jpg') ||
pathname.endsWith('.jpeg') ||
pathname.endsWith('.png') ||
pathname.endsWith('.gif') ||
pathname.endsWith('.webp') ||
pathname.endsWith('.svg')
)
} catch {
return false
}
}
/**
* Clear media cache (useful for testing)
*/
export function clearMediaCache(): void {
mediaCache.clear()
}

View File

@@ -0,0 +1,13 @@
#!/bin/bash
# Migration wrapper script - loads .env before running tsx
cd "$(dirname "$0")"
# Load .env file
if [ -f .env ]; then
export $(grep -v '^#' .env | xargs)
echo "✓ .env loaded"
fi
# Run the migration script
tsx migrate.ts "$@"

View File

@@ -0,0 +1,436 @@
#!/usr/bin/env tsx
/**
* Webflow to Payload CMS Migration Script
* Story 1.3: Content Migration Script
*
* Usage:
* pnpm tsx scripts/migration/migrate.ts [options]
*
* Options:
* --dry-run, -n Run without making changes
* --verbose, -v Show detailed logging
* --force, -f Overwrite existing items
* --collection, -c Specific collection (categories|posts|portfolio|all)
* --source, -s Path to export file
* --batch-size Batch size for processing (default: 5)
* --help, -h Show help message
*/
import { config as dotenvConfig } from 'dotenv'
// Load .env before any other imports
dotenvConfig({ path: '.env' })
// Ensure R2_BUCKET_NAME is set (from R2_BUCKET)
if (!process.env.R2_BUCKET_NAME && process.env.R2_BUCKET) {
process.env.R2_BUCKET_NAME = process.env.R2_BUCKET
}
import { getPayload } from 'payload'
import config from '../../src/payload.config'
import { parseCliArgs, Logger, colors } from './utils'
import { createReport, updateReport, saveReport, printReportSummary } from './reporter'
import {
findBySlug,
findBySlugAndDate,
getAllSlugs,
getExistingPostIdentifiers,
} from './deduplicator'
import { transformCategories, transformPosts, transformPortfolios } from './transformers'
import { processMediaUrls, getOrCreateMedia } from './mediaHandler'
import { parseWebflowHTML, parseHTMLFile, extractMediaUrls } from './htmlParser'
import { parseWebflowCSV } from './csvParser'
import type { MigrationConfig, WebflowExportData, PayloadCategory } from './types'
import { readFileSync, existsSync } from 'fs'
import { extname } from 'path'
// ============================================================
// MAIN MIGRATION FUNCTION
// ============================================================
async function main() {
// Parse CLI arguments
const args = process.argv.slice(2)
const config_options = parseCliArgs(args)
const logger = new Logger(config_options.verbose)
const report = createReport(config_options.dryRun)
logger.header('🚀 Webflow to Payload CMS Migration')
logger.info(`Mode: ${config_options.dryRun ? colors.yellow + 'DRY RUN' + colors.reset : colors.green + 'LIVE' + colors.reset}`)
logger.info(`Source: ${config_options.sourcePath}`)
logger.info(`Collections: ${config_options.collections.join(', ')}`)
// Initialize Payload
logger.info('\n📦 Initializing Payload CMS...')
const payload = await getPayload({ config })
// Load source data
logger.info('\n📂 Loading source data...')
const sourceData = await loadSourceData(config_options.sourcePath, logger)
if (!sourceData) {
logger.error('Failed to load source data')
process.exit(1)
}
// Process based on collections
const collectionsToProcess = determineCollections(config_options.collections)
// Migration order: Categories first, then Posts/Portfolio
let categoryMap = new Map<string, string>() // slug -> id
if (collectionsToProcess.includes('categories')) {
categoryMap = await migrateCategories(payload, sourceData, config_options, logger, report)
}
if (collectionsToProcess.includes('posts')) {
await migratePosts(payload, sourceData, config_options, logger, report, categoryMap)
}
if (collectionsToProcess.includes('portfolio')) {
await migratePortfolio(payload, sourceData, config_options, logger, report)
}
// Generate and save report
printReportSummary(report)
if (!config_options.dryRun) {
await saveReport(report, './apps/backend/reports')
}
}
// ============================================================
// DATA LOADING
// ============================================================
async function loadSourceData(
sourcePath: string,
logger: Logger,
): Promise<WebflowExportData | null> {
// Check if file exists
if (!existsSync(sourcePath)) {
logger.error(`Source file not found: ${sourcePath}`)
logger.info('\nCreating sample data structure for manual entry...')
return {
posts: [],
categories: [
{ name: 'Google小學堂', slug: 'google-workshop', colorHex: '#4285f4' },
{ name: 'Meta小學堂', slug: 'meta-workshop', colorHex: '#0668e1' },
{ name: '行銷時事最前線', slug: 'marketing-news', colorHex: '#34a853' },
{ name: '恩群數位最新公告', slug: 'enchun-announcements', colorHex: '#ea4335' },
],
portfolio: [],
}
}
const ext = extname(sourcePath).toLowerCase()
try {
if (ext === '.csv') {
// Parse CSV export (Webflow format)
logger.info('Parsing CSV file (Webflow format)...')
return await parseWebflowCSV(sourcePath)
} else if (ext === '.json') {
// Parse JSON export
const content = readFileSync(sourcePath, 'utf-8')
return JSON.parse(content) as WebflowExportData
} else if (ext === '.html' || ext === '.htm') {
// Parse HTML file
logger.info('Parsing HTML file (this may not capture all data)...')
return await parseHTMLFile(sourcePath)
} else {
// Auto-detect: try CSV first, then JSON, then HTML
logger.info('Auto-detecting file format...')
try {
return await parseWebflowCSV(sourcePath)
} catch {
try {
const content = readFileSync(sourcePath, 'utf-8')
return JSON.parse(content) as WebflowExportData
} catch {
const content = readFileSync(sourcePath, 'utf-8')
return parseWebflowHTML(content)
}
}
}
} catch (error) {
logger.error(`Error loading source data: ${error}`)
return null
}
}
// ============================================================
// COLLECTION MIGRATION
// ============================================================
async function migrateCategories(
payload: any,
sourceData: WebflowExportData,
config: MigrationConfig,
logger: Logger,
report: any,
): Promise<Map<string, string>> {
logger.header('\n🏷 Migrating Categories')
const categories = sourceData.categories || []
if (categories.length === 0) {
logger.warn('No categories found in source data')
return new Map()
}
logger.info(`Found ${categories.length} categories`)
const categoryMap = new Map<string, string>()
const results: any[] = []
let created = 0,
skipped = 0,
failed = 0
// Get existing slugs for deduplication
const existingSlugs = config.force ? new Set<string>() : await getAllSlugs(payload, 'categories')
for (const category of categories) {
const transformed = transformCategories([category])[0]
if (!config.force && existingSlugs.has(transformed.slug)) {
logger.debug(`⏭️ Skipping existing category: ${transformed.title}`)
skipped++
results.push({ slug: transformed.slug, success: true, skipped: true })
continue
}
if (config.dryRun) {
logger.debug(`✓ Would create category: ${transformed.title}`)
created++
results.push({ slug: transformed.slug, success: true })
categoryMap.set(transformed.slug, `dry-run-id-${created}`)
continue
}
try {
const result = await payload.create({
collection: 'categories',
data: transformed,
})
logger.success(`Created category: ${transformed.title}`)
created++
results.push({ slug: transformed.slug, success: true, id: result.id })
categoryMap.set(transformed.slug, result.id)
} catch (error) {
logger.error(`Failed to create category "${transformed.title}": ${error}`)
failed++
results.push({ slug: transformed.slug, success: false, error: String(error) })
}
}
updateReport(report, {
collection: 'categories',
created,
skipped,
failed,
results,
})
logger.info(`Categories: ${created} created, ${skipped} skipped, ${failed} failed`)
return categoryMap
}
async function migratePosts(
payload: any,
sourceData: WebflowExportData,
config: MigrationConfig,
logger: Logger,
report: any,
categoryMap: Map<string, string>,
): Promise<void> {
logger.header('\n📝 Migrating Posts')
const posts = sourceData.posts || []
if (posts.length === 0) {
logger.warn('No posts found in source data')
return
}
logger.info(`Found ${posts.length} posts`)
const results: any[] = []
let created = 0,
skipped = 0,
failed = 0
// Get existing identifiers for deduplication
const existingIds = config.force ? new Map<string, Date>() : await getExistingPostIdentifiers(payload)
// Extract media URLs for batch processing
const mediaUrls = new Set<string>()
for (const post of posts) {
if (post.featuredImage) mediaUrls.add(post.featuredImage)
}
// Process media
if (mediaUrls.size > 0 && !config.dryRun) {
logger.info(`Processing ${mediaUrls.size} media files...`)
const mediaMap = await processMediaUrls(payload, Array.from(mediaUrls), {
batchSize: config.batchSize,
onProgress: (current, total) => logger.progress(current, total, 'media'),
})
logger.success(`Media processing complete`)
}
for (const post of posts) {
const transformed = transformPosts([post])[0]
// Resolve category IDs
if (post.postCategory && categoryMap.has(post.postCategory)) {
transformed.categories = [categoryMap.get(post.postCategory)!]
}
// Check for duplicates
const postKey = `${transformed.slug}-${transformed.publishedAt.toISOString()}`
if (!config.force && existingIds.has(postKey)) {
logger.debug(`⏭️ Skipping existing post: ${transformed.title}`)
skipped++
results.push({ slug: transformed.slug, success: true, skipped: true })
continue
}
if (config.dryRun) {
logger.debug(`✓ Would create post: ${transformed.title}`)
created++
results.push({ slug: transformed.slug, success: true })
continue
}
try {
const result = await payload.create({
collection: 'posts',
data: transformed,
})
logger.success(`Created post: ${transformed.title}`)
created++
results.push({ slug: transformed.slug, success: true, id: result.id })
} catch (error) {
logger.error(`Failed to create post "${transformed.title}": ${error}`)
failed++
results.push({ slug: transformed.slug, success: false, error: String(error) })
}
}
updateReport(report, {
collection: 'posts',
created,
skipped,
failed,
results,
})
logger.info(`Posts: ${created} created, ${skipped} skipped, ${failed} failed`)
}
async function migratePortfolio(
payload: any,
sourceData: WebflowExportData,
config: MigrationConfig,
logger: Logger,
report: any,
): Promise<void> {
logger.header('\n💼 Migrating Portfolio')
const portfolio = sourceData.portfolio || []
if (portfolio.length === 0) {
logger.warn('No portfolio items found in source data')
return
}
logger.info(`Found ${portfolio.length} portfolio items`)
const results: any[] = []
let created = 0,
skipped = 0,
failed = 0
// Get existing slugs
const existingSlugs = config.force ? new Set<string>() : await getAllSlugs(payload, 'portfolio')
// Extract media URLs
const mediaUrls = new Set<string>()
for (const item of portfolio) {
if (item.previewImage) mediaUrls.add(item.previewImage)
}
// Process media
if (mediaUrls.size > 0 && !config.dryRun) {
logger.info(`Processing ${mediaUrls.size} media files...`)
await processMediaUrls(payload, Array.from(mediaUrls), {
batchSize: config.batchSize,
onProgress: (current, total) => logger.progress(current, total, 'media'),
})
logger.success(`Media processing complete`)
}
for (const item of portfolio) {
const transformed = transformPortfolios([item])[0]
if (!config.force && existingSlugs.has(transformed.slug)) {
logger.debug(`⏭️ Skipping existing portfolio: ${transformed.title}`)
skipped++
results.push({ slug: transformed.slug, success: true, skipped: true })
continue
}
if (config.dryRun) {
logger.debug(`✓ Would create portfolio: ${transformed.title}`)
created++
results.push({ slug: transformed.slug, success: true })
continue
}
try {
const result = await payload.create({
collection: 'portfolio',
data: transformed,
})
logger.success(`Created portfolio: ${transformed.title}`)
created++
results.push({ slug: transformed.slug, success: true, id: result.id })
} catch (error) {
logger.error(`Failed to create portfolio "${transformed.title}": ${error}`)
failed++
results.push({ slug: transformed.slug, success: false, error: String(error) })
}
}
updateReport(report, {
collection: 'portfolio',
created,
skipped,
failed,
results,
})
logger.info(`Portfolio: ${created} created, ${skipped} skipped, ${failed} failed`)
}
// ============================================================
// HELPER FUNCTIONS
// ============================================================
function determineCollections(collections: string[]): Array<'categories' | 'posts' | 'portfolio'> {
if (collections.includes('all')) {
return ['categories', 'posts', 'portfolio']
}
return collections as Array<'categories' | 'posts' | 'portfolio'>
}
// ============================================================
// ENTRY POINT
// ============================================================
main().catch((error) => {
console.error(`${colors.red}Fatal error:${colors.reset}`, error)
process.exit(1)
})

View File

@@ -0,0 +1,188 @@
/**
* Migration Reporter
* Story 1.3: Content Migration Script
*
* Generates migration reports in JSON and Markdown formats
*/
import type { MigrationReport, CollectionMigrationResult } from './types'
import { writeFile, mkdir } from 'fs/promises'
import { join } from 'path'
import { existsSync } from 'fs'
// ============================================================
// REPORT GENERATION
// ============================================================
/**
* Create a new empty report
*/
export function createReport(dryRun: boolean = false): MigrationReport {
return {
timestamp: new Date().toISOString(),
dryRun,
summary: {
total: 0,
created: 0,
skipped: 0,
failed: 0,
},
byCollection: {},
details: {},
}
}
/**
* Update report with collection results
*/
export function updateReport(
report: MigrationReport,
collectionResult: CollectionMigrationResult,
): MigrationReport {
const { collection, created, skipped, failed } = collectionResult
// Update byCollection stats
report.byCollection[collection] = { created, skipped, failed }
// Store details
report.details[collection] = collectionResult
// Update summary
report.summary.total += created + skipped + failed
report.summary.created += created
report.summary.skipped += skipped
report.summary.failed += failed
return report
}
/**
* Generate markdown report
*/
export function generateMarkdownReport(report: MigrationReport): string {
const lines: string[] = []
lines.push(`# Migration Report`)
lines.push(``)
lines.push(`**Generated:** ${new Date(report.timestamp).toLocaleString('zh-TW')}`)
lines.push(`**Mode:** ${report.dryRun ? '🧪 Dry Run (no changes made)' : '✅ Live Migration'}`)
lines.push(``)
lines.push(`---`)
lines.push(``)
// Summary section
lines.push(`## Summary`)
lines.push(``)
lines.push(`| Metric | Count |`)
lines.push(`|--------|-------|`)
lines.push(`| Total Items | ${report.summary.total} |`)
lines.push(`| ✅ Created | ${report.summary.created} |`)
lines.push(`| ⏭️ Skipped | ${report.summary.skipped} |`)
lines.push(`| ❌ Failed | ${report.summary.failed} |`)
lines.push(``)
// By collection section
lines.push(`## By Collection`)
lines.push(``)
for (const [collection, stats] of Object.entries(report.byCollection)) {
lines.push(`### ${collection.charAt(0).toUpperCase() + collection.slice(1)}`)
lines.push(``)
lines.push(`| Metric | Count |`)
lines.push(`|--------|-------|`)
lines.push(`| Created | ${stats.created} |`)
lines.push(`| Skipped | ${stats.skipped} |`)
lines.push(`| Failed | ${stats.failed} |`)
lines.push(``)
}
// Details section
if (report.details) {
lines.push(`## Details`)
lines.push(``)
for (const [collection, result] of Object.entries(report.details)) {
lines.push(`### ${collection.charAt(0).toUpperCase() + collection.slice(1)}`)
lines.push(``)
// Created items
if (result.results.some((r) => r.success)) {
lines.push(`#### ✅ Created (${result.results.filter((r) => r.success).length})`)
lines.push(``)
for (const item of result.results.filter((r) => r.success)) {
lines.push(`- \`${item.slug}\` (ID: ${item.id})`)
}
lines.push(``)
}
// Failed items
if (result.results.some((r) => !r.success)) {
lines.push(`#### ❌ Failed (${result.results.filter((r) => !r.success).length})`)
lines.push(``)
for (const item of result.results.filter((r) => !r.success)) {
lines.push(`- \`${item.slug}\`: ${item.error}`)
}
lines.push(``)
}
}
}
return lines.join('\n')
}
/**
* Save report to files
*/
export async function saveReport(
report: MigrationReport,
outputDir: string = './reports',
): Promise<void> {
// Ensure directory exists
if (!existsSync(outputDir)) {
await mkdir(outputDir, { recursive: true })
}
const timestamp = new Date().toISOString().replace(/[:.]/g, '-').split('T')[0]
const baseName = `migration-${timestamp}`
// Save JSON report
const jsonPath = join(outputDir, `${baseName}.json`)
await writeFile(jsonPath, JSON.stringify(report, null, 2), 'utf-8')
// Save Markdown report
const mdPath = join(outputDir, `${baseName}.md`)
await writeFile(mdPath, generateMarkdownReport(report), 'utf-8')
console.log(`\n📄 Reports saved:`)
console.log(` - JSON: ${jsonPath}`)
console.log(` - Markdown: ${mdPath}`)
}
/**
* Print report summary to console
*/
export function printReportSummary(report: MigrationReport): void {
console.log(`\n${'='.repeat(60)}`)
console.log(`📊 MIGRATION REPORT`)
console.log(`${'='.repeat(60)}`)
console.log(``)
console.log(`Mode: ${report.dryRun ? '🧪 Dry Run' : '✅ Live'}`)
console.log(``)
console.log(`Summary:`)
console.log(` Total: ${report.summary.total}`)
console.log(` Created: ${report.summary.created}`)
console.log(` Skipped: ${report.summary.skipped} ⏭️`)
console.log(` Failed: ${report.summary.failed}`)
console.log(``)
if (report.byCollection) {
console.log(`By Collection:`)
for (const [collection, stats] of Object.entries(report.byCollection)) {
console.log(
` ${collection}: ${stats.created} created, ${stats.skipped} skipped, ${stats.failed} failed`,
)
}
}
console.log(`${'='.repeat(60)}`)
}

View File

@@ -0,0 +1,93 @@
#!/usr/bin/env tsx
/**
* Test MongoDB and Payload CMS Connection
*/
import { config as dotenvConfig } from 'dotenv'
import { resolve, dirname } from 'path'
import { fileURLToPath } from 'url'
// Resolve .env path from script location
const __filename = fileURLToPath(import.meta.url)
const __dirname = dirname(__filename)
const envPath = resolve(__dirname, '../../../.env')
console.log(`Loading .env from: ${envPath}`)
dotenvConfig({ path: envPath })
import { getPayload } from 'payload'
import config from '../../src/payload.config'
async function testMongoDirect() {
console.log('\n🔍 Testing Environment Variables...\n')
const uri = process.env.DATABASE_URI
console.log(`DATABASE_URI: ${uri?.replace(/:[^:@]+@/, ':****@')}`)
if (!uri) {
console.log('❌ DATABASE_URI not set')
return false
}
console.log('✅ DATABASE_URI is set')
return true
}
async function testPayloadAPI() {
console.log('\n🔍 Testing Payload CMS API Connection...\n')
try {
const payload = await getPayload({ config })
console.log('✅ Payload CMS initialized')
// Find posts
const posts = await payload.find({
collection: 'posts',
limit: 3,
depth: 0,
})
console.log(`📝 Found ${posts.totalDocs} posts`)
if (posts.docs.length > 0) {
const post = posts.docs[0]
console.log(`\n📋 First post:`)
console.log(` Title: ${post.title}`)
console.log(` Slug: ${post.slug}`)
console.log(` Content Type: ${typeof post.content}`)
if (typeof post.content === 'string') {
try {
const parsed = JSON.parse(post.content)
console.log(` Lexical Type: ${parsed?.type}`)
console.log(` Lexical Version: ${parsed?.version}`)
console.log(` Children Count: ${parsed?.children?.length}`)
} catch {
console.log(` Content (first 200 chars): ${post.content.substring(0, 200)}...`)
}
}
}
return true
} catch (error) {
console.log('❌ Payload CMS connection failed:', error)
return false
}
}
async function main() {
console.log('═══════════════════════════════════════════════════════════')
console.log('Connection Test')
console.log('═══════════════════════════════════════════════════════════')
const mongoOk = await testMongoDirect()
const payloadOk = await testPayloadAPI()
console.log('\n═══════════════════════════════════════════════════════════')
console.log('Summary:')
console.log(` DATABASE_URI: ${mongoOk ? '✅ OK' : '❌ FAILED'}`)
console.log(` Payload CMS: ${payloadOk ? '✅ OK' : '❌ FAILED'}`)
console.log('═══════════════════════════════════════════════════════════\n')
}
main().catch(console.error)

View File

@@ -0,0 +1,49 @@
#!/usr/bin/env tsx
import { config as dotenvConfig } from 'dotenv'
import { resolve, dirname } from 'path'
import { fileURLToPath } from 'url'
const __filename = fileURLToPath(import.meta.url)
const __dirname = dirname(__filename)
// Script is at: apps/backend/scripts/migration/test-create-post.ts
// .env is at: apps/backend/.env
const envPath = resolve(__dirname, '../../.env')
dotenvConfig({ path: envPath })
// Verify env is loaded
console.log('PAYLOAD_SECRET loaded:', !!process.env.PAYLOAD_SECRET)
console.log('PAYLOAD_SECRET value:', process.env.PAYLOAD_SECRET?.substring(0, 10))
// Dynamic import after dotenv to ensure env is loaded before config
const { getPayload } = await import('payload')
const config = await import('../../src/payload.config.ts')
const { parseWebflowCSV } = await import('./csvParser.ts')
const { transformPosts } = await import('./transformers.ts')
async function main() {
const payload = await getPayload({ config })
const data = await parseWebflowCSV('/Users/pukpuk/Dev/website-enchun-mgr/恩群數位行銷 - 行銷放大鏡集.csv')
const failPost = data.posts.find((p: any) => p.title.includes('一點都不難'))
const transformed = transformPosts([failPost])[0]
console.log('Attempting to create:', transformed.title)
console.log('Slug:', transformed.slug)
try {
const result = await payload.create({
collection: 'posts',
data: transformed,
})
console.log('Success! Created:', result.id)
} catch (error: any) {
console.log('Failed!')
console.log('Error message:', error.message)
if (error.data) {
console.log('Error data:', JSON.stringify(error.data, null, 2))
}
}
}
main().catch(console.error)

View File

@@ -0,0 +1,46 @@
#!/usr/bin/env tsx
/**
* Test CSV Parser (No MongoDB required)
*/
import { parseWebflowCSV } from './csvParser'
import { Logger } from './utils'
const logger = new Logger(true)
async function main() {
const csvPath = process.argv[2] || '../../恩群數位行銷 - 行銷放大鏡集 - 61f24aa108528b279f942ca9.csv'
logger.header('🧪 Testing CSV Parser')
logger.info(`File: ${csvPath}`)
try {
const data = await parseWebflowCSV(csvPath)
logger.success(`✓ CSV parsed successfully!`)
logger.info(`Posts: ${data.posts?.length || 0}`)
logger.info(`Categories: ${data.categories?.length || 0}`)
logger.info(`Portfolio: ${data.portfolio?.length || 0}`)
if (data.categories && data.categories.length > 0) {
logger.info('\n📋 Categories found:')
for (const cat of data.categories) {
logger.info(` - ${cat.name} (${cat.slug}) - ${cat.colorHex}`)
}
}
if (data.posts && data.posts.length > 0) {
logger.info('\n📝 First 5 posts:')
for (const post of data.posts.slice(0, 5)) {
logger.info(` - [${post.postCategory || 'uncategorized'}] ${post.title}`)
logger.debug(` Slug: ${post.slug}`)
logger.debug(` Date: ${post.publishedDate}`)
}
}
} catch (error) {
logger.error(`Error: ${error}`)
}
}
main()

View File

@@ -0,0 +1,15 @@
#!/usr/bin/env tsx
import { config as dotenvConfig } from 'dotenv'
import { resolve, dirname } from 'path'
import { fileURLToPath } from 'url'
const __filename = fileURLToPath(import.meta.url)
const __dirname = dirname(__filename)
const envPath = resolve(__dirname, '../../.env')
console.log('Loading .env from:', envPath)
const result = dotenvConfig({ path: envPath })
console.log('dotenv result:', result.error ? result.error.message : 'success')
console.log('PAYLOAD_SECRET:', process.env.PAYLOAD_SECRET ? 'SET' : 'NOT SET')
console.log('DATABASE_URI:', process.env.DATABASE_URI ? 'SET' : 'NOT SET')

View File

@@ -0,0 +1,111 @@
#!/usr/bin/env tsx
/**
* Test Payload Post Creation with Lexical Content
*/
import { getPayload } from 'payload'
import config from '@payload-config'
async function testPostCreation() {
const payload = await getPayload({ config })
// Test 1: Simple string content
console.log('\n🧪 Test 1: String content')
try {
const result1 = await payload.create({
collection: 'posts',
data: {
title: 'Test String Content',
slug: 'test-string-' + Date.now(),
content: '<p>Simple HTML content</p>',
publishedAt: new Date(),
status: 'draft',
},
})
console.log('✓ String content worked:', result1.id)
await payload.delete({
collection: 'posts',
id: result1.id,
})
} catch (error: any) {
console.log('✗ String content failed:', error.message)
}
// Test 2: JSON string content
console.log('\n🧪 Test 2: JSON string content')
try {
const result2 = await payload.create({
collection: 'posts',
data: {
title: 'Test JSON Content',
slug: 'test-json-' + Date.now(),
content: JSON.stringify({
type: 'root',
version: 1,
children: [{
type: 'paragraph',
version: 1,
children: [{
type: 'text',
version: 1,
text: 'This is a test paragraph.'
}]
}],
direction: null
}),
publishedAt: new Date(),
status: 'draft',
},
})
console.log('✓ JSON string content worked:', result2.id)
await payload.delete({
collection: 'posts',
id: result2.id,
})
} catch (error: any) {
console.log('✗ JSON string content failed:', error.message)
if (error.data) {
console.error(' Validation errors:', JSON.stringify(error.data, null, 2))
}
}
// Test 3: Object content
console.log('\n🧪 Test 3: Object content')
try {
const result3 = await payload.create({
collection: 'posts',
data: {
title: 'Test Object Content',
slug: 'test-object-' + Date.now(),
content: {
type: 'root',
version: 1,
children: [{
type: 'paragraph',
version: 1,
children: [{
type: 'text',
version: 1,
text: 'This is a test paragraph.'
}]
}],
direction: null
},
publishedAt: new Date(),
status: 'draft',
},
})
console.log('✓ Object content worked:', result3.id)
await payload.delete({
collection: 'posts',
id: result3.id,
})
} catch (error: any) {
console.log('✗ Object content failed:', error.message)
if (error.data) {
console.error(' Validation errors:', JSON.stringify(error.data, null, 2))
}
}
}
testPostCreation().catch(console.error)

View File

@@ -0,0 +1,107 @@
#!/usr/bin/env tsx
/**
* Test Payload Post Creation - Two Step Approach
*/
import { getPayload } from 'payload'
import config from '@payload-config'
async function testTwoStepPost() {
const payload = await getPayload({ config })
const testSlug = 'test-two-step-' + Date.now()
let postId = ''
// Step 1: Create post without content
console.log('\n🧪 Step 1: Create post without content')
try {
const result = await payload.create({
collection: 'posts',
data: {
title: 'Test Two Step Post',
slug: testSlug,
publishedAt: new Date(),
status: 'draft',
// No content field
},
})
console.log('✓ Post created without content:', result.id)
postId = result.id
} catch (error: any) {
console.log('✗ Failed:', error.message)
if (error.data) {
console.error(' Errors:', JSON.stringify(error.data, null, 2))
}
return
}
// Step 2: Update with content
console.log('\n🧪 Step 2: Update post with content (object)')
try {
const updated = await payload.update({
collection: 'posts',
id: postId,
data: {
content: {
type: 'root',
version: 1,
children: [{
type: 'paragraph',
version: 1,
children: [{
type: 'text',
version: 1,
text: 'This is a test paragraph.'
}]
}],
direction: null
},
},
})
console.log('✓ Post updated with object content:', updated.id)
} catch (error: any) {
console.log('✗ Object content failed:', error.message)
}
// Step 3: Try JSON string content
console.log('\n🧪 Step 3: Update with JSON string content')
try {
const updated = await payload.update({
collection: 'posts',
id: postId,
data: {
content: JSON.stringify({
type: 'root',
version: 1,
children: [{
type: 'paragraph',
version: 1,
children: [{
type: 'text',
version: 1,
text: 'This is a test paragraph from JSON string.'
}]
}],
direction: null
}),
},
})
console.log('✓ Post updated with JSON string content:', updated.id)
} catch (error: any) {
console.log('✗ JSON string content failed:', error.message)
}
// Cleanup
console.log('\n🧪 Cleanup: Delete test post')
try {
await payload.delete({
collection: 'posts',
id: postId,
})
console.log('✓ Test post deleted')
} catch (error: any) {
console.log('✗ Delete failed:', error.message)
}
}
testTwoStepPost().catch(console.error)

View File

@@ -0,0 +1,207 @@
/**
* Data Transformers
* Story 1.3: Content Migration Script
*
* Transforms Webflow data to Payload CMS format
*/
import type {
PayloadCategory,
PayloadPostData,
PayloadPortfolioData,
WebflowCategory,
WebflowPost,
WebflowPortfolioItem,
} from './types'
import { toSlug, splitColorToTextBackground, truncate, htmlToPlainText, parseDate } from './utils'
import { htmlToLexical } from './lexicalConverter'
// ============================================================
// CATEGORY TRANSFORMER
// ============================================================
/**
* Transform Webflow category to Payload CMS format
*/
export function transformCategory(
webflowCategory: WebflowCategory,
order: number = 0,
): PayloadCategory {
const { textColor, backgroundColor } = splitColorToTextBackground(
webflowCategory.colorHex || '#ffffff',
)
return {
title: webflowCategory.name,
nameEn: '', // Can be manually set later
order,
textColor,
backgroundColor,
slug: webflowCategory.slug || toSlug(webflowCategory.name),
}
}
/**
* Transform multiple categories
*/
export function transformCategories(
webflowCategories: WebflowCategory[],
): PayloadCategory[] {
return webflowCategories.map((cat, index) => transformCategory(cat, index))
}
// ============================================================
// POST TRANSFORMER
// ============================================================
/**
* Transform Webflow post to Payload CMS format
*/
export function transformPost(webflowPost: WebflowPost): PayloadPostData {
// Generate excerpt from content if not provided
const excerpt = webflowPost.excerpt || htmlToPlainText(webflowPost.content, 200)
// Convert HTML to Lexical JSON string format (for richText field storage)
const lexicalContent = htmlToLexical(webflowPost.content || '')
return {
title: webflowPost.title,
slug: webflowPost.slug || toSlug(webflowPost.title),
heroImage: undefined, // Will be set by media handler
ogImage: undefined, // Will be set by media handler
content: lexicalContent as any, // Lexical JSON string for richText field
excerpt: truncate(excerpt, 200),
publishedAt: parseDate(webflowPost.publishedDate),
status: 'published',
categories: [], // Will be resolved after categories are migrated
meta: {
title: webflowPost.seoTitle || webflowPost.title,
description: webflowPost.seoDescription || excerpt,
image: undefined, // Will be set by media handler
},
}
}
/**
* Transform multiple posts
*/
export function transformPosts(webflowPosts: WebflowPost[]): PayloadPostData[] {
return webflowPosts.map((post) => transformPost(post))
}
// ============================================================
// PORTFOLIO TRANSFORMER
// ============================================================
/**
* Transform Webflow portfolio item to Payload CMS format
*/
export function transformPortfolio(
webflowPortfolio: WebflowPortfolioItem,
): PayloadPortfolioData {
return {
title: webflowPortfolio.name,
slug: webflowPortfolio.slug || toSlug(webflowPortfolio.name),
url: webflowPortfolio.websiteLink,
image: undefined, // Will be set by media handler
description: webflowPortfolio.description,
websiteType: webflowPortfolio.websiteType || 'other',
tags: parseTagsString(webflowPortfolio.tags),
}
}
/**
* Transform multiple portfolio items
*/
export function transformPortfolios(
webflowPortfolios: WebflowPortfolioItem[],
): PayloadPortfolioData[] {
return webflowPortfolios.map((item) => transformPortfolio(item))
}
// ============================================================
// HELPER FUNCTIONS
// ============================================================
/**
* Parse comma-separated tags into array
*/
function parseTagsString(tagsString: string): Array<{ tag: string }> {
if (!tagsString || typeof tagsString !== 'string') {
return []
}
return tagsString
.split(',')
.map((tag) => tag.trim())
.filter(Boolean)
.map((tag) => ({ tag }))
}
// ============================================================
// VALIDATION FUNCTIONS
// ============================================================
/**
* Validate transformed category
*/
export function validateCategory(category: PayloadCategory): { valid: boolean; errors: string[] } {
const errors: string[] = []
if (!category.title) {
errors.push('Category title is required')
}
if (!category.slug) {
errors.push('Category slug is required')
}
if (!category.textColor || !category.backgroundColor) {
errors.push('Category colors are required')
}
return { valid: errors.length === 0, errors }
}
/**
* Validate transformed post
*/
export function validatePost(post: PayloadPostData): { valid: boolean; errors: string[] } {
const errors: string[] = []
if (!post.title) {
errors.push('Post title is required')
}
if (!post.slug) {
errors.push('Post slug is required')
}
if (!post.content) {
errors.push('Post content is required')
}
if (!post.publishedAt) {
errors.push('Post published date is required')
}
return { valid: errors.length === 0, errors }
}
/**
* Validate transformed portfolio item
*/
export function validatePortfolio(
portfolio: PayloadPortfolioData,
): { valid: boolean; errors: string[] } {
const errors: string[] = []
if (!portfolio.title) {
errors.push('Portfolio title is required')
}
if (!portfolio.slug) {
errors.push('Portfolio slug is required')
}
if (!portfolio.url) {
errors.push('Portfolio URL is required')
}
if (!portfolio.websiteType) {
errors.push('Portfolio website type is required')
}
return { valid: errors.length === 0, errors }
}

View File

@@ -0,0 +1,159 @@
/**
* Migration Script Types
* Story 1.3: Content Migration Script
*/
import { File } from 'payload'
// Lexical content type (simplified)
export interface LexicalRoot {
type: 'root'
version: 1
children: Array<any>
direction: string | null
}
// ============================================================
// WEBFLOW DATA TYPES (from HTML/JSON export)
// ============================================================
export interface WebflowPost {
title: string
slug: string
content: string // HTML content
publishedDate: string | Date
postCategory?: string // Category name or slug
featuredImage?: string // Image URL
seoTitle?: string
seoDescription?: string
excerpt?: string
}
export interface WebflowCategory {
name: string // Chinese name
slug: string
colorHex?: string // Single color to split into text + background
}
export interface WebflowPortfolioItem {
name: string
slug: string
websiteLink: string
previewImage: string // Image URL
description: string
websiteType: 'corporate' | 'ecommerce' | 'landing' | 'brand' | 'other'
tags: string // Comma-separated string
}
export interface WebflowExportData {
posts?: WebflowPost[]
categories?: WebflowCategory[]
portfolio?: WebflowPortfolioItem[]
}
// ============================================================
// PAYLOAD CMS DATA TYPES
// ============================================================
export interface PayloadCategory {
title: string
nameEn?: string
order: number
textColor: string
backgroundColor: string
slug: string
}
export interface PayloadPostData {
title: string
slug: string
heroImage?: string // Media ID
ogImage?: string // Media ID
content: string // Lexical JSON string for richText field
excerpt?: string
publishedAt: Date
status: 'draft' | 'review' | 'published'
categories?: string[] // Category IDs
meta?: {
title?: string
description?: string
image?: string // Media ID
}
}
export interface PayloadPortfolioData {
title: string
slug: string
url: string
image?: string // Media ID
description?: string
websiteType: 'corporate' | 'ecommerce' | 'landing' | 'brand' | 'other'
tags?: Array<{ tag: string }>
}
// ============================================================
// MIGRATION RESULT TYPES
// ============================================================
export interface MigrationResult {
success: boolean
id?: string
slug?: string
error?: string
skipped?: boolean
}
export interface CollectionMigrationResult {
collection: string
created: number
skipped: number
failed: number
results: Array<{
slug: string
success: boolean
id?: string
error?: string
}>
}
export interface MigrationReport {
timestamp: string
dryRun: boolean
summary: {
total: number
created: number
skipped: number
failed: number
}
byCollection: {
categories?: { created: number; skipped: number; failed: number }
posts?: { created: number; skipped: number; failed: number }
portfolio?: { created: number; skipped: number; failed: number }
}
details: {
categories?: CollectionMigrationResult
posts?: CollectionMigrationResult
portfolio?: CollectionMigrationResult
}
}
// ============================================================
// MIGRATION CONFIG TYPES
// ============================================================
export interface MigrationConfig {
dryRun: boolean
verbose: boolean
collections: Array<'categories' | 'posts' | 'portfolio' | 'all'>
force: boolean // Overwrite existing items
batchSize: number // For batch processing
sourcePath: string // Path to HTML or JSON file
}
export interface MediaDownloadResult {
success: boolean
url: string
buffer?: Buffer
filename?: string
error?: string
}

View File

@@ -0,0 +1,78 @@
#!/usr/bin/env tsx
import('dotenv/config')
import('payload').then(async ({getPayload})=>{
const cfg = await import('../../src/payload.config.ts')
const p = await getPayload({config: cfg.default})
console.log('Loading media...')
const media = await p.find({collection:'media', limit:100, depth:0})
const filenameToId = new Map<string, string>()
media.docs.forEach((m: any) => {
filenameToId.set(m.filename, m.id)
})
console.log(`Found ${filenameToId.size} media files`)
const { parseWebflowCSV } = await import('./csvParser.ts')
const { transformPosts } = await import('./transformers.ts')
const data = await parseWebflowCSV('/Users/pukpuk/Dev/website-enchun-mgr/恩群數位行銷 - 行銷放大鏡集.csv')
const posts = await p.find({collection:'posts', limit:100, depth:0})
const postsBySlug = new Map<string, any>()
posts.docs.forEach((post: any) => {
postsBySlug.set(post.slug, post)
})
console.log('\nMatching hero images...\n')
let matched = 0
let updated = 0
let notFound = 0
for (const webflowPost of data.posts) {
const featuredImage = webflowPost.featuredImage
if (!featuredImage) continue
const urlParts = featuredImage.split('/')
const webflowFilename = urlParts[urlParts.length - 1]
let mediaId: string | null = null
for (const [filename, id] of filenameToId.entries()) {
// Match by checking if both contain the same hash (first 15 chars)
const webflowHash = webflowFilename.split('.')[0].substring(0, 15)
const payloadHash = filename.split('.')[0].substring(0, 15)
if (filename.includes(webflowHash) || webflowFilename.includes(payloadHash)) {
mediaId = id
matched++
break
}
}
if (!mediaId) {
notFound++
console.log(`No match: ${webflowPost.title?.substring(0, 40)}`)
console.log(` URL: ${webflowFilename.substring(0, 80)}`)
continue
}
const transformed = transformPosts([webflowPost])[0]
const post = postsBySlug.get(transformed.slug)
if (post && !post.heroImage) {
await p.update({
collection:'posts',
id: post.id,
data:{ heroImage: mediaId }
})
updated++
console.log(`Updated: ${webflowPost.title?.substring(0, 40)}`)
}
}
console.log('\n=== SUMMARY ===')
console.log(`Matched: ${matched}`)
console.log(`Updated: ${updated}`)
console.log(`Not found: ${notFound}`)
}).catch(e=>console.error('Error:', e))

View File

@@ -0,0 +1,105 @@
#!/usr/bin/env tsx
/**
* Update posts with heroImage by matching Webflow URLs to uploaded Media
*/
import { config as dotenvConfig } from 'dotenv'
import { resolve, dirname } from 'path'
import { fileURLToPath } from 'url'
const __filename = fileURLToPath(import.meta.url)
const __dirname = dirname(__filename)
const envPath = resolve(__dirname, '../../.env')
dotenvConfig({ path: envPath })
// Debug
console.log('Loading .env from:', envPath)
console.log('PAYLOAD_SECRET loaded:', !!process.env.PAYLOAD_SECRET)
import { getPayload } from 'payload'
import config from '../../src/payload.config'
import { parseWebflowCSV } from './csvParser'
import { transformPosts } from './transformers'
async function main() {
const payload = await getPayload({ config })
console.log('🔍 Loading media files...')
const media = await payload.find({ collection: 'media', limit: 100, depth: 0 })
// Create filename to ID mapping
const filenameToId = new Map<string, string>()
media.docs.forEach((m: any) => {
filenameToId.set(m.filename, m.id)
})
console.log(`📁 Found ${filenameToId.size} media files`)
console.log('📂 Loading CSV data...')
const data = await parseWebflowCSV('/Users/pukpuk/Dev/website-enchun-mgr/恩群數位行銷 - 行銷放大鏡集.csv')
console.log('📝 Loading posts...')
const posts = await payload.find({ collection: 'posts', limit: 100, depth: 0 })
const postsBySlug = new Map<string, any>()
posts.docs.forEach((post: any) => {
postsBySlug.set(post.slug, post)
})
console.log('\n🔗 Matching hero images...\n')
let matched = 0
let updated = 0
let notFound = 0
for (const webflowPost of data.posts) {
const featuredImage = webflowPost.featuredImage
if (!featuredImage) continue
// Extract filename from Webflow URL
const urlParts = featuredImage.split('/')
const webflowFilename = urlParts[urlParts.length - 1]
// Find matching media by comparing filename patterns
let mediaId: string | null = null
for (const [filename, id] of filenameToId.entries()) {
// Check if Webflow filename is contained in Payload filename or vice versa
// They may have different prefixes but the hash should match
if (filename.includes(webflowFilename.split('.')[0].substring(0, 15)) ||
webflowFilename.includes(filename.split('.')[0].substring(0, 15))) {
mediaId = id
matched++
break
}
}
if (!mediaId) {
notFound++
console.log(`❌ No match: ${webflowPost.title?.substring(0, 40)}`)
console.log(` URL: ${webflowFilename}`)
continue
}
// Find and update the post
const transformed = transformPosts([webflowPost])[0]
const post = postsBySlug.get(transformed.slug)
if (post && !post.heroImage) {
await payload.update({
collection: 'posts',
id: post.id,
data: { heroImage: mediaId },
})
updated++
console.log(`✓ Updated: ${webflowPost.title?.substring(0, 40)}`)
}
}
console.log('\n=== SUMMARY ===')
console.log(`Matched: ${matched}`)
console.log(`Updated: ${updated}`)
console.log(`Not found: ${notFound}`)
}
main().catch(console.error)

View File

@@ -0,0 +1,377 @@
/**
* Migration Utilities
* Story 1.3: Content Migration Script
*/
import { MigrationConfig } from './types'
// ============================================================
// LOGGING UTILITIES
// ============================================================
export const colors = {
reset: '\x1b[0m',
bright: '\x1b[1m',
dim: '\x1b[2m',
red: '\x1b[31m',
green: '\x1b[32m',
yellow: '\x1b[33m',
blue: '\x1b[34m',
cyan: '\x1b[36m',
white: '\x1b[37m',
}
export class Logger {
private verbose: boolean
constructor(verbose: boolean = false) {
this.verbose = verbose
}
info(message: string): void {
console.log(`${colors.blue}${colors.reset} ${message}`)
}
success(message: string): void {
console.log(`${colors.green}${colors.reset} ${message}`)
}
error(message: string): void {
console.log(`${colors.red}${colors.reset} ${message}`)
}
warn(message: string): void {
console.log(`${colors.yellow}${colors.reset} ${message}`)
}
debug(message: string): void {
if (this.verbose) {
console.log(`${colors.dim}${colors.reset} ${message}`)
}
}
header(message: string): void {
console.log(`\n${colors.bright}${colors.cyan}${message}${colors.reset}`)
}
progress(current: number, total: number, message: string = ''): void {
const percent = Math.round((current / total) * 100)
const bar = '█'.repeat(Math.floor(percent / 2)) + '░'.repeat(50 - Math.floor(percent / 2))
process.stdout.write(
`\r${colors.cyan}[${bar}]${colors.reset} ${percent}% ${message}`.padEnd(100),
)
if (current === total) {
process.stdout.write('\n')
}
}
}
// ============================================================
// STRING UTILITIES
// ============================================================
/**
* Convert any string to a URL-friendly slug
*/
export function toSlug(value: string): string {
return value
.toString()
.toLowerCase()
.trim()
.normalize('NFD') // Separate accented characters
.replace(/[\u0300-\u036f]/g, '') // Remove diacritics
.replace(/[^a-z0-9\u4e00-\u9fa5/-]/g, '-') // Replace non-alphanumeric with hyphen (keep Chinese)
.replace(/-+/g, '-') // Replace multiple hyphens with single
.replace(/^-+|-+$/g, '') // Trim hyphens from start/end
}
/**
* Extract filename from URL
*/
export function getFilenameFromUrl(url: string): string {
try {
const urlObj = new URL(url)
const pathname = urlObj.pathname
const filename = pathname.split('/').pop()
return filename || `file-${Date.now()}`
} catch {
return `file-${Date.now()}`
}
}
/**
* Get file extension from filename or URL
*/
export function getFileExtension(filename: string): string {
const match = filename.match(/\.([^.]+)$/)
return match ? match[1].toLowerCase() : 'jpg'
}
/**
* Parse comma-separated tags into array
*/
export function parseTagsString(tagsString: string): Array<{ tag: string }> {
if (!tagsString || typeof tagsString !== 'string') {
return []
}
return tagsString
.split(',')
.map((tag) => tag.trim())
.filter(Boolean)
.map((tag) => ({ tag }))
}
/**
* Convert hex color to text/background pair
* Uses luminance to determine if text should be black or white
*/
export function splitColorToTextBackground(
hexColor: string,
): { textColor: string; backgroundColor: string } {
// Default to black text on white background
const defaultResult = {
textColor: '#000000',
backgroundColor: '#ffffff',
}
if (!hexColor) {
return defaultResult
}
// Ensure hex format
let hex = hexColor.replace('#', '')
if (hex.length === 3) {
hex = hex.split('').map((c) => c + c).join('')
}
if (hex.length !== 6) {
return defaultResult
}
// Calculate luminance
const r = parseInt(hex.substr(0, 2), 16) / 255
const g = parseInt(hex.substr(2, 2), 16) / 255
const b = parseInt(hex.substr(4, 2), 16) / 255
const luminance = 0.299 * r + 0.587 * g + 0.114 * b
// Use original color as background, choose contrasting text
return {
textColor: luminance > 0.5 ? '#000000' : '#ffffff',
backgroundColor: `#${hex}`,
}
}
/**
* Truncate text to max length
*/
export function truncate(text: string, maxLength: number): string {
if (!text || text.length <= maxLength) {
return text || ''
}
return text.substring(0, maxLength - 3) + '...'
}
// ============================================================
// DATE UTILITIES
// ============================================================
/**
* Parse various date formats
*/
export function parseDate(dateValue: string | Date): Date {
if (dateValue instanceof Date) {
return dateValue
}
const parsed = new Date(dateValue)
if (isNaN(parsed.getTime())) {
return new Date() // Fallback to now
}
return parsed
}
/**
* Format date for display
*/
export function formatDate(date: Date): string {
return date.toISOString().split('T')[0]
}
// ============================================================
// HTML CLEANING UTILITIES
// ============================================================
/**
* Clean HTML content by removing Webflow-specific classes and attributes
*/
export function cleanHTML(html: string): string {
if (!html) {
return ''
}
return html
// Remove Webflow-specific classes
.replace(/\sclass="[^"]*w-[^"]*"/g, '')
.replace(/\sclass="[^"]*wf-[^"]*"/g, '')
// Remove data attributes used by Webflow
.replace(/\sdata-[a-z-]+="[^"]*"/gi, '')
// Remove empty style attributes
.replace(/\sstyle=""/g, '')
// Clean up multiple spaces
.replace(/\s+/g, ' ')
.trim()
}
/**
* Extract plain text from HTML (for excerpts)
*/
export function htmlToPlainText(html: string, maxLength: number = 200): string {
if (!html) {
return ''
}
// Remove script and style tags
let text = html.replace(/<script[^>]*>[\s\S]*?<\/script>/gi, '')
text = text.replace(/<style[^>]*>[\s\S]*?<\/style>/gi, '')
// Replace block elements with newlines
text = text.replace(/<\/(div|p|h[1-6]|li|tr)>/gi, '\n')
text = text.replace(/<(br|hr)\s*\/?>/gi, '\n')
// Remove all other tags
text = text.replace(/<[^>]+>/g, '')
// Decode HTML entities
text = text.replace(/&nbsp;/g, ' ')
text = text.replace(/&amp;/g, '&')
text = text.replace(/&lt;/g, '<')
text = text.replace(/&gt;/g, '>')
text = text.replace(/&quot;/g, '"')
text = text.replace(/&#39;/g, "'")
// Clean up whitespace
text = text.replace(/\n\s*\n/g, '\n\n').trim()
return truncate(text, maxLength)
}
// ============================================================
// VALIDATION UTILITIES
// ============================================================
/**
* Check if a value is a valid URL
*/
export function isValidUrl(value: string): boolean {
try {
new URL(value)
return true
} catch {
return false
}
}
/**
* Check if a value is a valid image URL
*/
export function isValidImageUrl(value: string): boolean {
if (!isValidUrl(value)) {
return false
}
const ext = getFileExtension(value)
return ['jpg', 'jpeg', 'png', 'gif', 'webp', 'svg'].includes(ext)
}
// ============================================================
// CONFIG UTILITIES
// ============================================================
/**
* Parse CLI arguments into MigrationConfig
*/
export function parseCliArgs(args: string[]): MigrationConfig {
const config: MigrationConfig = {
dryRun: false,
verbose: false,
collections: ['all'],
force: false,
batchSize: 5,
sourcePath: './data/webflow-export.json',
}
for (let i = 0; i < args.length; i++) {
const arg = args[i]
switch (arg) {
case '--dry-run':
case '-n':
config.dryRun = true
break
case '--verbose':
case '-v':
config.verbose = true
break
case '--force':
case '-f':
config.force = true
break
case '--collection':
case '-c':
if (args[i + 1]) {
const collection = args[++i]
if (collection === 'all') {
config.collections = ['all']
} else if (['categories', 'posts', 'portfolio'].includes(collection)) {
config.collections = [collection as any]
}
}
break
case '--source':
case '-s':
if (args[i + 1]) {
config.sourcePath = args[++i]
}
break
case '--batch-size':
if (args[i + 1]) {
config.batchSize = parseInt(args[++i], 10) || 5
}
break
case '--help':
case '-h':
printHelp()
process.exit(0)
}
}
return config
}
/**
* Print help message
*/
export function printHelp(): void {
console.log(`
${colors.bright}Webflow to Payload CMS Migration Script${colors.reset}
${colors.cyan}Usage:${colors.reset}
pnpm tsx scripts/migration/migrate.ts [options]
${colors.cyan}Options:${colors.reset}
-n, --dry-run Run without making changes (preview mode)
-v, --verbose Show detailed logging output
-f, --force Overwrite existing items (skip deduplication)
-c, --collection <name> Specific collection to migrate (categories|posts|portfolio|all)
-s, --source <path> Path to HTML/JSON export file
--batch-size <number> Number of items to process in parallel (default: 5)
-h, --help Show this help message
${colors.cyan}Examples:${colors.reset}
pnpm tsx scripts/migration/migrate.ts --dry-run --verbose
pnpm tsx scripts/migration/migrate.ts --collection posts
pnpm tsx scripts/migration/migrate.ts --source ./data/export.json
${colors.cyan}Environment Variables:${colors.reset}
PAYLOAD_CMS_URL Payload CMS URL (default: http://localhost:3000)
MIGRATION_ADMIN_EMAIL Admin user email for authentication
MIGRATION_ADMIN_PASSWORD Admin user password
`)
}

View File

@@ -0,0 +1,178 @@
import type { GlobalConfig } from 'payload'
import { adminOnly } from '../access/adminOnly'
import { auditGlobalChange } from '../collections/Audit/hooks/auditHooks'
import { revalidateHome } from './hooks/revalidateHome'
export const Home: GlobalConfig = {
slug: 'home',
access: {
read: () => true,
update: adminOnly,
},
fields: [
// Hero Section
{
name: 'heroHeadline',
type: 'text',
required: true,
defaultValue: '創造企業更多發展的可能性\n是我們的使命',
admin: {
description: '首頁 Hero 主標題(支援換行)',
},
},
{
name: 'heroSubheadline',
type: 'text',
required: true,
defaultValue: "It's our destiny to create possibilities for your business.",
admin: {
description: '首頁 Hero 副標題(英文)',
},
},
{
name: 'heroDesktopVideo',
type: 'upload',
relationTo: 'media',
required: false,
admin: {
description: '桌面版 Hero 背景影片',
},
},
{
name: 'heroMobileVideo',
type: 'upload',
relationTo: 'media',
required: false,
admin: {
description: '手機版 Hero 背景影片(建議較小檔案以節省流量)',
},
},
{
name: 'heroFallbackImage',
type: 'upload',
relationTo: 'media',
required: false,
admin: {
description: '影片載入失敗時的備用圖片',
},
},
{
name: 'heroLogo',
type: 'upload',
relationTo: 'media',
required: false,
admin: {
description: 'Hero 區域顯示的 Logo可選',
},
},
// Service Features Section
{
name: 'serviceFeatures',
type: 'array',
fields: [
{
name: 'icon',
type: 'text',
required: true,
admin: {
description: '圖示(支援 SVG 或 Emoji例如🎯 或 <svg>...</svg>',
},
},
{
name: 'title',
type: 'text',
required: true,
},
{
name: 'description',
type: 'textarea',
required: true,
},
{
name: 'link',
type: 'group',
fields: [
{
name: 'url',
type: 'text',
admin: {
description: '連結 URL可選',
},
},
],
},
],
maxRows: 4,
admin: {
initCollapsed: true,
},
},
// Portfolio Preview Section
{
name: 'portfolioSection',
type: 'group',
fields: [
{
name: 'headline',
type: 'text',
defaultValue: '精選案例',
required: true,
},
{
name: 'subheadline',
type: 'text',
defaultValue: '探索我們為客戶打造的優質網站',
required: false,
},
{
name: 'itemsToShow',
type: 'number',
defaultValue: 3,
min: 1,
max: 6,
admin: {
description: '顯示多少個作品項目',
},
},
],
},
// CTA Section
{
name: 'ctaSection',
type: 'group',
fields: [
{
name: 'headline',
type: 'text',
defaultValue: '準備好開始新的旅程了嗎',
required: true,
},
{
name: 'description',
type: 'textarea',
defaultValue: '讓我們一起打造您的數位成功故事',
required: false,
},
{
name: 'buttonText',
type: 'text',
defaultValue: '聯絡我們',
required: true,
},
{
name: 'buttonLink',
type: 'text',
defaultValue: '/contact-us',
required: true,
},
],
},
],
hooks: {
afterChange: [revalidateHome, auditGlobalChange('home')],
},
}

View File

@@ -0,0 +1,14 @@
import type { GlobalAfterChangeHook } from 'payload'
import { revalidateTag } from 'next/cache'
export const revalidateHome: GlobalAfterChangeHook = async ({ doc, req }) => {
const { payload, context } = req
if (!context.disableRevalidate) {
payload.logger.info(`Revalidating home`)
revalidateTag('global_home')
}
return doc
}

View File

@@ -8,13 +8,16 @@ import { logDocumentChange } from '@/utilities/auditLogger'
*/
export const auditChange =
(collection: string): AfterChangeHook =>
async ({ doc, req }) => {
async ({ doc, req, context }) => {
// 跳過 audit 集合本身以避免無限循環
if (collection === 'audit') return doc
// Determine operation from context or default to 'update'
const operation = (context?.operation as 'create' | 'update' | 'delete') || 'update'
await logDocumentChange(
req,
operation as 'create' | 'update' | 'delete',
operation,
collection,
doc.id as string,
(doc.title || doc.name || String(doc.id)) as string,

View File

@@ -111,7 +111,7 @@ export const Posts: CollectionConfig<'posts'> = {
},
}),
label: false,
required: true,
required: false, // Temporarily disabled for migration
},
{
name: 'excerpt',

View File

@@ -103,10 +103,12 @@ export interface Config {
globals: {
header: Header;
footer: Footer;
home: Home;
};
globalsSelect: {
header: HeaderSelect<false> | HeaderSelect<true>;
footer: FooterSelect<false> | FooterSelect<true>;
home: HomeSelect<false> | HomeSelect<true>;
};
locale: null;
user: User & {
@@ -220,7 +222,7 @@ export interface Post {
* Facebook/LINE 分享時顯示的預覽圖,建議 1200x630px
*/
ogImage?: (string | null) | Media;
content: {
content?: {
root: {
type: string;
children: {
@@ -234,7 +236,7 @@ export interface Post {
version: number;
};
[k: string]: unknown;
};
} | null;
/**
* 顯示在文章列表頁,建議 150-200 字
*/
@@ -1410,6 +1412,70 @@ export interface Footer {
updatedAt?: string | null;
createdAt?: string | null;
}
/**
* This interface was referenced by `Config`'s JSON-Schema
* via the `definition` "home".
*/
export interface Home {
id: string;
/**
* 首頁 Hero 主標題(支援換行)
*/
heroHeadline: string;
/**
* 首頁 Hero 副標題(英文)
*/
heroSubheadline: string;
/**
* 桌面版 Hero 背景影片
*/
heroDesktopVideo?: (string | null) | Media;
/**
* 手機版 Hero 背景影片(建議較小檔案以節省流量)
*/
heroMobileVideo?: (string | null) | Media;
/**
* 影片載入失敗時的備用圖片
*/
heroFallbackImage?: (string | null) | Media;
/**
* Hero 區域顯示的 Logo可選
*/
heroLogo?: (string | null) | Media;
serviceFeatures?:
| {
/**
* 圖示(支援 SVG 或 Emoji例如🎯 或 <svg>...</svg>
*/
icon: string;
title: string;
description: string;
link?: {
/**
* 連結 URL可選
*/
url?: string | null;
};
id?: string | null;
}[]
| null;
portfolioSection: {
headline: string;
subheadline?: string | null;
/**
* 顯示多少個作品項目
*/
itemsToShow?: number | null;
};
ctaSection: {
headline: string;
description?: string | null;
buttonText: string;
buttonLink: string;
};
updatedAt?: string | null;
createdAt?: string | null;
}
/**
* This interface was referenced by `Config`'s JSON-Schema
* via the `definition` "header_select".
@@ -1470,6 +1536,49 @@ export interface FooterSelect<T extends boolean = true> {
createdAt?: T;
globalType?: T;
}
/**
* This interface was referenced by `Config`'s JSON-Schema
* via the `definition` "home_select".
*/
export interface HomeSelect<T extends boolean = true> {
heroHeadline?: T;
heroSubheadline?: T;
heroDesktopVideo?: T;
heroMobileVideo?: T;
heroFallbackImage?: T;
heroLogo?: T;
serviceFeatures?:
| T
| {
icon?: T;
title?: T;
description?: T;
link?:
| T
| {
url?: T;
};
id?: T;
};
portfolioSection?:
| T
| {
headline?: T;
subheadline?: T;
itemsToShow?: T;
};
ctaSection?:
| T
| {
headline?: T;
description?: T;
buttonText?: T;
buttonLink?: T;
};
updatedAt?: T;
createdAt?: T;
globalType?: T;
}
/**
* This interface was referenced by `Config`'s JSON-Schema
* via the `definition` "TaskCleanup-audit-logs".

View File

@@ -14,6 +14,7 @@ import { Portfolio } from './collections/Portfolio'
import { Posts } from './collections/Posts'
import { Users } from './collections/Users'
import { Footer } from './Footer/config'
import { Home } from './Home/config'
import { Header } from './Header/config'
import { plugins } from './plugins'
import { defaultLexical } from '@/fields/defaultLexical'
@@ -71,7 +72,7 @@ export default buildConfig({
'http://localhost:4321', // Astro dev server
'http://localhost:8788', // Wrangler Pages dev server
].filter(Boolean),
globals: [Header, Footer],
globals: [Header, Footer, Home],
email: resendAdapter({
defaultFromAddress: 'dev@resend.com',
defaultFromName: '恩群數位行銷',