fix: registry number re-allocation on company change + extreme PDF large file support

- Registratura: re-allocate number when company/direction changes on update,
  recalculate old company's sequence counter from actual entries
- Extreme PDF: stream body to temp file instead of req.formData() to support large files

Co-Authored-By: Claude Opus 4.6 <noreply@anthropic.com>
This commit is contained in:
AI Assistant
2026-03-11 14:05:13 +02:00
parent ed504bd1de
commit 1c51236c31
3 changed files with 118 additions and 13 deletions
+50 -10
View File
@@ -1,10 +1,13 @@
import { NextRequest, NextResponse } from "next/server";
import { writeFile, readFile, unlink, mkdir } from "fs/promises";
import { writeFile, readFile, unlink, mkdir, stat } from "fs/promises";
import { createWriteStream } from "fs";
import { execFile } from "child_process";
import { promisify } from "util";
import { randomUUID } from "crypto";
import { join } from "path";
import { tmpdir } from "os";
import { Readable } from "stream";
import { pipeline } from "stream/promises";
const execFileAsync = promisify(execFile);
@@ -97,23 +100,60 @@ async function cleanup(dir: string) {
export async function POST(req: NextRequest) {
const tmpDir = join(tmpdir(), `pdf-extreme-${randomUUID()}`);
try {
const formData = await req.formData();
const fileBlob = formData.get("fileInput") as Blob | null;
if (!fileBlob) {
// Stream body directly to temp file — avoids req.formData() size limit
// that causes "Failed to parse body as FormData" on large files
await mkdir(tmpDir, { recursive: true });
const rawPath = join(tmpDir, "raw-upload");
const inputPath = join(tmpDir, "input.pdf");
const gsOutputPath = join(tmpDir, "gs-output.pdf");
const finalOutputPath = join(tmpDir, "final.pdf");
if (!req.body) {
return NextResponse.json(
{ error: "Lipsește fișierul PDF." },
{ status: 400 },
);
}
const originalSize = fileBlob.size;
await mkdir(tmpDir, { recursive: true });
// Write the raw multipart body to disk
const nodeStream = Readable.fromWeb(req.body as import("stream/web").ReadableStream);
await pipeline(nodeStream, createWriteStream(rawPath));
const inputPath = join(tmpDir, "input.pdf");
const gsOutputPath = join(tmpDir, "gs-output.pdf");
const finalOutputPath = join(tmpDir, "final.pdf");
// Extract the PDF from multipart: find the double CRLF after headers,
// then read until the boundary marker before the end
const rawBuf = await readFile(rawPath);
const headerEnd = rawBuf.indexOf(Buffer.from("\r\n\r\n"));
if (headerEnd === -1) {
return NextResponse.json(
{ error: "Lipsește fișierul PDF." },
{ status: 400 },
);
}
await writeFile(inputPath, Buffer.from(await fileBlob.arrayBuffer()));
// Extract boundary from Content-Type header
const contentType = req.headers.get("content-type") || "";
const boundaryMatch = contentType.match(/boundary=(?:"([^"]+)"|([^\s;]+))/);
const boundary = boundaryMatch?.[1] ?? boundaryMatch?.[2] ?? "";
if (!boundary) {
return NextResponse.json(
{ error: "Lipsește fișierul PDF." },
{ status: 400 },
);
}
// File content starts after first double CRLF, ends before closing boundary
const closingBoundary = Buffer.from(`\r\n--${boundary}`);
const fileStart = headerEnd + 4;
const fileEnd = rawBuf.indexOf(closingBoundary, fileStart);
const pdfData = fileEnd !== -1 ? rawBuf.subarray(fileStart, fileEnd) : rawBuf.subarray(fileStart);
await writeFile(inputPath, pdfData);
const originalSize = pdfData.length;
// Clean up raw file early to free disk space
await unlink(rawPath).catch(() => {});
// Step 1: Ghostscript — aggressive image recompression + downsampling
try {