'use client'; import { useState } from 'react'; import { useRouter } from 'next/navigation'; import ImportLayout from '../../components/layout/ImportLayout'; import { Input } from '../../components/ui/Input'; import Button from '../../components/ui/Button'; export default function ImportFromUrlPage() { const [importUrl, setImportUrl] = useState(''); const [scraping, setScraping] = useState(false); const [errors, setErrors] = useState>({}); const router = useRouter(); const handleImportFromUrl = async () => { if (!importUrl.trim()) { setErrors({ importUrl: 'URL is required' }); return; } setScraping(true); setErrors({}); try { const response = await fetch('/scrape/story', { method: 'POST', headers: { 'Content-Type': 'application/json', }, body: JSON.stringify({ url: importUrl }), }); if (!response.ok) { const errorData = await response.json(); throw new Error(errorData.error || 'Failed to scrape story'); } const scrapedStory = await response.json(); // Redirect to add-story page with pre-filled data const queryParams = new URLSearchParams({ from: 'url-import', title: scrapedStory.title || '', summary: scrapedStory.summary || '', author: scrapedStory.author || '', sourceUrl: scrapedStory.sourceUrl || importUrl, tags: JSON.stringify(scrapedStory.tags || []), content: scrapedStory.content || '' }); router.push(`/add-story?${queryParams.toString()}`); } catch (error: any) { console.error('Failed to import story:', error); setErrors({ importUrl: error.message }); } finally { setScraping(false); } }; return (

Import Story from URL

Enter a URL from a supported story site to automatically extract the story content, title, author, and other metadata. After importing, you'll be able to review and edit the data before saving.

setImportUrl(e.target.value)} placeholder="https://example.com/story-url" error={errors.importUrl} disabled={scraping} />

Supported Sites:

Archive of Our Own, DeviantArt, FanFiction.Net, Literotica, Royal Road, Wattpad, and more

); }