import rl from 'node:readline'; import { StableDiffusionApi } from 'stable-diffusion-api'; import sharp from 'sharp'; const PROMPT = "extremely detailed cinematic close up photo of an (Nikolay Valuev:1.3) as ethereal neural network organism, anatomical face, biomechanical details"; const api = new StableDiffusionApi({ host: "127.0.0.1", port: 7860, protocol: "http", defaultSampler: "DPM++ 2M Karras", defaultStepCount: 22, }); await api.setModel("deliberate_v3"); function printProgress() { const progressInterval = setInterval(async () => { const response = await api.getProgress(); if (response.progress === 0.0 && response.state.job_count === 0) { clearInterval(progressInterval); } rl.cursorTo(process.stdout, 0); rl.clearLine(process.stdout, 0); process.stdout.write(`[WAIT]: progress = ${response.progress.toFixed(2)}, jobs: ${response.state.job_count}`); }, 200); } async function predict(prompt) { printProgress(); return api.txt2img({ prompt: prompt, negative_prompt: '[deformed | disfigured], poorly drawn, [bad : wrong] anatomy, [extra | missing | floating | disconnected] limb, (mutated hands and fingers), blurry', batch_size: 4, cfg_scale: 7, width: 640, height: 640, enable_hr: false, hr_resize_x: 1280, hr_resize_y: 1280, hr_upscaler: "4x_NMKD-Siax_200k", hr_second_pass_steps: 8, denoising_strength: 0.36, seed: -1 }); } const results = []; const prediction = await predict(PROMPT); for (let result of prediction.images) { const image = await result.png().toBuffer(); results.push(image); } const canvas = await sharp({ create: { width: 1280, height: 1280, channels: 3, background: { r: 0, g: 0, b: 0 } } }).png().toBuffer(); const result = sharp(canvas).composite([ { input: results[0], gravity: 'northwest' }, { input: results[1], gravity: 'northeast' }, { input: results[2], gravity: 'southwest' }, { input: results[3], gravity: 'southeast' }, ]); await result.jpeg().toFile('result.jpeg');