Spaces:
Sleeping
Sleeping
| import express from 'express'; | |
| import multer from 'multer'; | |
| import cors from 'cors'; | |
| import * as tf from '@tensorflow/tfjs'; | |
| import '@tensorflow/tfjs-backend-wasm'; | |
| import '@tensorflow/tfjs-backend-cpu'; | |
| import { createCanvas, loadImage } from 'canvas'; | |
| import { createRequire } from 'module'; | |
| // Pro importy CommonJS balíčků | |
| const require = createRequire(import.meta.url); | |
| // Import Upscaler using dynamic import | |
| let Upscaler; | |
| const app = express(); | |
| const PORT = process.env.PORT || 7860; | |
| // Middleware | |
| app.use(cors()); | |
| app.use(express.json({ limit: '50mb' })); | |
| app.use(express.urlencoded({ extended: true, limit: '50mb' })); | |
| // Serve static files from public directory | |
| app.use(express.static('public')); | |
| // Configure multer for file uploads | |
| const upload = multer({ | |
| storage: multer.memoryStorage(), | |
| limits: { | |
| fileSize: 10 * 1024 * 1024, // 10MB limit | |
| }, | |
| fileFilter: (req, file, cb) => { | |
| if (file.mimetype.startsWith('image/')) { | |
| cb(null, true); | |
| } else { | |
| cb(new Error('Only image files are allowed'), false); | |
| } | |
| } | |
| }); | |
| // Global upscaler instance | |
| let upscalerInstance = null; | |
| // Get model for scale and type | |
| async function getModelForScaleAndType(scale, modelType) { | |
| switch (modelType) { | |
| case 'esrgan-slim': | |
| const { x2: slimX2, x3: slimX3, x4: slimX4 } = await import('@upscalerjs/esrgan-slim'); | |
| if (scale === 2) return slimX2; | |
| if (scale === 3) return slimX3; | |
| return slimX4; | |
| case 'esrgan-medium': | |
| const { x2: mediumX2, x3: mediumX3, x4: mediumX4 } = await import('@upscalerjs/esrgan-medium'); | |
| if (scale === 2) return mediumX2; | |
| if (scale === 3) return mediumX3; | |
| return mediumX4; | |
| case 'esrgan-thick': | |
| const { x2: thickX2, x3: thickX3, x4: thickX4 } = await import('@upscalerjs/esrgan-thick'); | |
| if (scale === 2) return thickX2; | |
| if (scale === 3) return thickX3; | |
| return thickX4; | |
| default: | |
| const { x2: defaultX2, x3: defaultX3, x4: defaultX4 } = await import('@upscalerjs/esrgan-slim'); | |
| if (scale === 2) return defaultX2; | |
| if (scale === 3) return defaultX3; | |
| return defaultX4; | |
| } | |
| } | |
| // Initialize upscaler with specific model | |
| async function initializeUpscaler(scale = 2, modelType = 'esrgan-slim') { | |
| try { | |
| console.log(`Initializing upscaler with scale ${scale}x and model ${modelType}...`); | |
| if (!Upscaler) { | |
| const upscalerModule = await import('upscaler'); | |
| Upscaler = upscalerModule.default; | |
| } | |
| const model = await getModelForScaleAndType(scale, modelType); | |
| upscalerInstance = new Upscaler({ model }); | |
| console.log('Upscaler initialized successfully'); | |
| return upscalerInstance; | |
| } catch (error) { | |
| console.error('Failed to initialize upscaler:', error); | |
| throw error; | |
| } | |
| } | |
| // Convert buffer to base64 data URL | |
| function bufferToDataURL(buffer, mimeType = 'image/png') { | |
| const base64 = buffer.toString('base64'); | |
| return `data:${mimeType};base64,${base64}`; | |
| } | |
| // Health check endpoint | |
| app.get('/', (req, res) => { | |
| res.json({ | |
| status: 'ok', | |
| message: 'AI Image Upscaler API', | |
| backend: tf.getBackend(), | |
| version: '1.0.0', | |
| endpoints: { | |
| upscale: 'POST /upscale', | |
| health: 'GET /' | |
| } | |
| }); | |
| }); | |
| // Main upscale endpoint | |
| app.post('/upscale', upload.single('image'), async (req, res) => { | |
| try { | |
| if (!req.file) { | |
| return res.status(400).json({ error: 'No image file provided' }); | |
| } | |
| const { scale = 2, modelType = 'esrgan-slim', patchSize = 128, padding = 8 } = req.body; | |
| // Validate parameters | |
| const validScales = [2, 3, 4]; | |
| const validModels = ['esrgan-slim', 'esrgan-medium', 'esrgan-thick']; | |
| if (!validScales.includes(parseInt(scale))) { | |
| return res.status(400).json({ error: 'Invalid scale. Must be 2, 3, or 4' }); | |
| } | |
| if (!validModels.includes(modelType)) { | |
| return res.status(400).json({ error: 'Invalid model type' }); | |
| } | |
| console.log(`Processing image with scale ${scale}x, model ${modelType}`); | |
| // Initialize upscaler if needed | |
| if (!upscalerInstance) { | |
| await initializeUpscaler(parseInt(scale), modelType); | |
| } | |
| // Convert image buffer to data URL | |
| const inputDataURL = bufferToDataURL(req.file.buffer, req.file.mimetype); | |
| // Perform upscaling | |
| console.log('Starting upscaling...'); | |
| const startTime = Date.now(); | |
| const result = await upscalerInstance.upscale(inputDataURL, { | |
| output: 'base64', | |
| patchSize: parseInt(patchSize), | |
| padding: parseInt(padding), | |
| awaitNextFrame: true | |
| }); | |
| const processingTime = Date.now() - startTime; | |
| console.log(`Upscaling completed in ${processingTime}ms`); | |
| // Return the upscaled image | |
| res.json({ | |
| success: true, | |
| result: result, | |
| metadata: { | |
| scale: parseInt(scale), | |
| modelType: modelType, | |
| patchSize: parseInt(patchSize), | |
| padding: parseInt(padding), | |
| processingTime: processingTime, | |
| backend: tf.getBackend() | |
| } | |
| }); | |
| } catch (error) { | |
| console.error('Upscaling error:', error); | |
| res.status(500).json({ | |
| error: 'Failed to upscale image', | |
| message: error.message, | |
| backend: tf.getBackend() | |
| }); | |
| } | |
| }); | |
| // Error handling middleware | |
| app.use((error, req, res, next) => { | |
| if (error instanceof multer.MulterError) { | |
| if (error.code === 'LIMIT_FILE_SIZE') { | |
| return res.status(400).json({ error: 'File too large. Maximum size is 10MB' }); | |
| } | |
| } | |
| console.error('Unhandled error:', error); | |
| res.status(500).json({ error: 'Internal server error' }); | |
| }); | |
| // Initialize TensorFlow.js | |
| async function initializeTensorFlow() { | |
| try { | |
| console.log('Initializing TensorFlow.js...'); | |
| // Try WASM backend first | |
| try { | |
| await tf.setBackend('wasm'); | |
| await tf.ready(); | |
| console.log('TensorFlow.js initialized with WASM backend'); | |
| console.log('Current backend:', tf.getBackend()); | |
| return true; | |
| } catch (wasmError) { | |
| console.warn('WASM backend failed, trying CPU backend:', wasmError.message); | |
| // Fallback to CPU backend | |
| try { | |
| await tf.setBackend('cpu'); | |
| await tf.ready(); | |
| console.log('TensorFlow.js initialized with CPU backend'); | |
| console.log('Current backend:', tf.getBackend()); | |
| return true; | |
| } catch (cpuError) { | |
| console.error('Both WASM and CPU backends failed:', cpuError.message); | |
| return false; | |
| } | |
| } | |
| } catch (error) { | |
| console.error('Failed to initialize TensorFlow.js:', error); | |
| return false; | |
| } | |
| } | |
| // Start server | |
| async function startServer() { | |
| try { | |
| // Initialize TensorFlow.js | |
| const tfInitialized = await initializeTensorFlow(); | |
| if (!tfInitialized) { | |
| console.error('Failed to initialize TensorFlow.js. Exiting...'); | |
| process.exit(1); | |
| } | |
| // Start the server | |
| app.listen(PORT, '0.0.0.0', () => { | |
| console.log(`🚀 Upscaler API server running on port ${PORT}`); | |
| console.log(`📊 TensorFlow.js backend: ${tf.getBackend()}`); | |
| console.log(`🔗 Health check: http://localhost:${PORT}/`); | |
| }); | |
| } catch (error) { | |
| console.error('Failed to start server:', error); | |
| process.exit(1); | |
| } | |
| } | |
| // Handle graceful shutdown | |
| process.on('SIGTERM', () => { | |
| console.log('Received SIGTERM, shutting down gracefully...'); | |
| if (upscalerInstance) { | |
| try { | |
| upscalerInstance.dispose(); | |
| } catch (error) { | |
| console.warn('Error disposing upscaler:', error); | |
| } | |
| } | |
| process.exit(0); | |
| }); | |
| // Start the server | |
| startServer(); | |