perf: separate blob storage for registratura attachments

Root cause: even with SQL-level stripping, PostgreSQL must TOAST-decompress
entire multi-MB JSONB values from disk before any processing. For 5 entries
with PDF attachments (25-50MB total), this takes several seconds.

Fix: store base64 attachment data in separate namespace 'registratura-blobs'.
Main entries are inherently small (~1-2KB). List queries never touch heavy data.

Changes:
- registry-service.ts: extractBlobs/mergeBlobs split base64 on save/load,
  migrateEntryBlobs() one-time migration for existing entries
- use-registry.ts: dual namespace (registratura + registratura-blobs),
  migration runs on first mount
- registratura-module.tsx: removed useContacts/useTags hooks that triggered
  2 unnecessary API fetches on page load (write-only ops use direct storage)

Before: 3 API calls on mount, one reading 25-50MB from PostgreSQL
After: 1 API call on mount, reading ~5-10KB total
This commit is contained in:
AI Assistant
2026-02-27 23:35:04 +02:00
parent 8385041bb0
commit f8c19bb5b4
3 changed files with 227 additions and 53 deletions
@@ -24,8 +24,8 @@ import {
DialogFooter,
} from "@/shared/components/ui/dialog";
import { useRegistry } from "../hooks/use-registry";
import { useContacts } from "@/modules/address-book/hooks/use-contacts";
import { useTags } from "@/core/tagging";
import { useStorage, useStorageService } from "@/core/storage";
import { v4 as uuid } from "uuid";
import { RegistryFilters } from "./registry-filters";
import { RegistryTable } from "./registry-table";
import { RegistryEntryForm } from "./registry-entry-form";
@@ -55,15 +55,16 @@ export function RegistraturaModule() {
removeDeadline,
} = useRegistry();
const { addContact } = useContacts();
const { createTag } = useTags("document-type");
// Direct storage for write-only operations — avoids loading all contacts/tags on mount
const contactStorage = useStorage("address-book");
const tagStorageService = useStorageService();
const [viewMode, setViewMode] = useState<ViewMode>("list");
const [editingEntry, setEditingEntry] = useState<RegistryEntry | null>(null);
const [closingId, setClosingId] = useState<string | null>(null);
const [linkCheckId, setLinkCheckId] = useState<string | null>(null);
// ── Bidirectional Address Book integration ──
// ── Bidirectional Address Book integration (write-only, no eager fetch) ──
const handleCreateContact = useCallback(
async (data: {
name: string;
@@ -71,7 +72,9 @@ export function RegistraturaModule() {
email: string;
}): Promise<AddressContact | undefined> => {
try {
const contact = await addContact({
const now = new Date().toISOString();
const contact: AddressContact = {
id: uuid(),
name: data.name,
company: "",
type: "collaborator",
@@ -88,30 +91,36 @@ export function RegistraturaModule() {
tags: [],
notes: "Creat automat din Registratură",
visibility: "all",
});
createdAt: now,
updatedAt: now,
};
await contactStorage.set(`contact:${contact.id}`, contact);
return contact;
} catch {
return undefined;
}
},
[addContact],
[contactStorage],
);
// ── Bidirectional Tag Manager integration ──
// ── Bidirectional Tag Manager integration (write-only, no eager fetch) ──
const handleCreateDocType = useCallback(
async (label: string) => {
try {
await createTag({
const tagId = uuid();
await tagStorageService.set("tags", tagId, {
id: tagId,
label,
category: "document-type",
scope: "global",
color: "#64748b",
createdAt: new Date().toISOString(),
});
} catch {
// tag may already exist — ignore
}
},
[createTag],
[tagStorageService],
);
const handleAdd = async (
+35 -34
View File
@@ -1,6 +1,6 @@
"use client";
import { useState, useEffect, useCallback } from "react";
import { useState, useEffect, useCallback, useRef } from "react";
import { useStorage } from "@/core/storage";
import { v4 as uuid } from "uuid";
import type {
@@ -17,6 +17,7 @@ import {
saveEntry,
deleteEntry,
generateRegistryNumber,
migrateEntryBlobs,
} from "../services/registry-service";
import {
createTrackedDeadline,
@@ -34,6 +35,7 @@ export interface RegistryFilters {
export function useRegistry() {
const storage = useStorage("registratura");
const blobStorage = useStorage("registratura-blobs");
const [entries, setEntries] = useState<RegistryEntry[]>([]);
const [loading, setLoading] = useState(true);
const [filters, setFilters] = useState<RegistryFilters>({
@@ -43,6 +45,7 @@ export function useRegistry() {
documentType: "all",
company: "all",
});
const migrationRan = useRef(false);
const refresh = useCallback(async () => {
setLoading(true);
@@ -51,17 +54,23 @@ export function useRegistry() {
setLoading(false);
}, [storage]);
// On mount: run migration (once), then load entries
// eslint-disable-next-line react-hooks/set-state-in-effect
useEffect(() => {
refresh();
}, [refresh]);
const init = async () => {
if (!migrationRan.current) {
migrationRan.current = true;
await migrateEntryBlobs(storage, blobStorage);
}
await refresh();
};
init();
}, [refresh, storage, blobStorage]);
const addEntry = useCallback(
async (
data: Omit<RegistryEntry, "id" | "number" | "createdAt" | "updatedAt">,
) => {
// Fetch fresh entries to prevent duplicate numbers from stale state.
// This single call replaces what was previously list() + N×get().
const freshEntries = await getAllEntries(storage);
const now = new Date().toISOString();
const number = generateRegistryNumber(
@@ -77,12 +86,11 @@ export function useRegistry() {
createdAt: now,
updatedAt: now,
};
await saveEntry(storage, entry);
// Update local state directly to avoid a second full fetch
await saveEntry(storage, blobStorage, entry);
setEntries((prev) => [entry, ...prev]);
return entry;
},
[storage],
[storage, blobStorage],
);
const updateEntry = useCallback(
@@ -97,50 +105,48 @@ export function useRegistry() {
createdAt: existing.createdAt,
updatedAt: new Date().toISOString(),
};
await saveEntry(storage, updated);
await saveEntry(storage, blobStorage, updated);
await refresh();
},
[storage, refresh, entries],
[storage, blobStorage, refresh, entries],
);
const removeEntry = useCallback(
async (id: string) => {
await deleteEntry(storage, id);
await deleteEntry(storage, blobStorage, id);
await refresh();
},
[storage, refresh],
[storage, blobStorage, refresh],
);
/** Close an entry and optionally its linked entries.
* Batches all saves, then does a single refresh at the end.
*/
const closeEntry = useCallback(
async (id: string, closeLinked: boolean) => {
const entry = entries.find((e) => e.id === id);
if (!entry) return;
// Save main entry as closed
const now = new Date().toISOString();
const closedMain: RegistryEntry = {
...entry,
status: "inchis",
updatedAt: now,
};
await saveEntry(storage, closedMain);
// Close linked entries in parallel
await saveEntry(storage, blobStorage, closedMain);
const linked = entry.linkedEntryIds ?? [];
if (closeLinked && linked.length > 0) {
const saves = linked
.map((linkedId) => entries.find((e) => e.id === linkedId))
.filter((e): e is RegistryEntry => !!e && e.status !== "inchis")
.map((e) =>
saveEntry(storage, { ...e, status: "inchis", updatedAt: now }),
saveEntry(storage, blobStorage, {
...e,
status: "inchis",
updatedAt: now,
}),
);
await Promise.all(saves);
}
// Single refresh at the end
await refresh();
},
[entries, storage, refresh],
[entries, storage, blobStorage, refresh],
);
const updateFilter = useCallback(
@@ -169,11 +175,11 @@ export function useRegistry() {
trackedDeadlines: [...existing, tracked],
updatedAt: new Date().toISOString(),
};
await saveEntry(storage, updated);
await saveEntry(storage, blobStorage, updated);
await refresh();
return tracked;
},
[entries, storage, refresh],
[entries, storage, blobStorage, refresh],
);
const resolveDeadline = useCallback(
@@ -198,9 +204,8 @@ export function useRegistry() {
trackedDeadlines: updatedDeadlines,
updatedAt: new Date().toISOString(),
};
await saveEntry(storage, updated);
await saveEntry(storage, blobStorage, updated);
// If the resolved deadline has a chain, automatically check for the next type
const def = getDeadlineType(dl.typeId);
await refresh();
@@ -213,7 +218,7 @@ export function useRegistry() {
return resolved;
},
[entries, storage, refresh],
[entries, storage, blobStorage, refresh],
);
const removeDeadline = useCallback(
@@ -226,10 +231,10 @@ export function useRegistry() {
trackedDeadlines: deadlines.filter((d) => d.id !== deadlineId),
updatedAt: new Date().toISOString(),
};
await saveEntry(storage, updated);
await saveEntry(storage, blobStorage, updated);
await refresh();
},
[entries, storage, refresh],
[entries, storage, blobStorage, refresh],
);
const filteredEntries = entries.filter((entry) => {
@@ -256,15 +261,11 @@ export function useRegistry() {
return true;
});
/**
* Load a single entry WITH full attachment data (for editing).
* The list uses lightweight mode that strips base64 data.
*/
const loadFullEntry = useCallback(
async (id: string): Promise<RegistryEntry | null> => {
return getFullEntry(storage, id);
return getFullEntry(storage, blobStorage, id);
},
[storage],
[storage, blobStorage],
);
return {
@@ -1,5 +1,5 @@
import type { CompanyId } from "@/core/auth/types";
import type { RegistryEntry } from "../types";
import type { RegistryEntry, RegistryAttachment } from "../types";
const STORAGE_PREFIX = "entry:";
@@ -13,15 +13,108 @@ export interface RegistryStorage {
}): Promise<Record<string, unknown>>;
}
// ── Blob separation ──
// Base64 attachment data is stored in a SEPARATE namespace ("registratura-blobs")
// so the main entries are always small (~1-2KB each). PostgreSQL never needs to
// decompress multi-MB TOAST chunks for list queries.
/** Shape of the blob record for one entry */
interface EntryBlobs {
/** attachmentId → base64 data */
attachments?: Record<string, string>;
/** closure attachment base64 */
closureAttachment?: string;
}
/** Strip base64 from attachments, return stripped entry + extracted blobs */
function extractBlobs(entry: RegistryEntry): {
stripped: RegistryEntry;
blobs: EntryBlobs | null;
} {
const blobs: EntryBlobs = {};
let hasBlobs = false;
// Strip attachment data (keep metadata: id, name, type, size, addedAt)
const strippedAttachments: RegistryAttachment[] = (
entry.attachments ?? []
).map((att) => {
if (att.data && att.data.length > 1024 && att.data !== "__stripped__") {
if (!blobs.attachments) blobs.attachments = {};
blobs.attachments[att.id] = att.data;
hasBlobs = true;
return { ...att, data: "" };
}
return att;
});
let stripped: RegistryEntry = { ...entry, attachments: strippedAttachments };
// Strip closure attachment data
if (
entry.closureInfo?.attachment?.data &&
entry.closureInfo.attachment.data.length > 1024 &&
entry.closureInfo.attachment.data !== "__stripped__"
) {
blobs.closureAttachment = entry.closureInfo.attachment.data;
hasBlobs = true;
stripped = {
...stripped,
closureInfo: {
...entry.closureInfo,
attachment: { ...entry.closureInfo.attachment, data: "" },
},
};
}
return { stripped, blobs: hasBlobs ? blobs : null };
}
/** Merge blob data back into a stripped entry */
function mergeBlobs(
entry: RegistryEntry,
blobs: EntryBlobs | null,
): RegistryEntry {
if (!blobs) return entry;
let merged = entry;
if (blobs.attachments) {
merged = {
...merged,
attachments: (merged.attachments ?? []).map((att) => {
const data = blobs.attachments?.[att.id];
if (data && (!att.data || att.data === "" || att.data === "__stripped__")) {
return { ...att, data };
}
return att;
}),
};
}
if (blobs.closureAttachment && merged.closureInfo?.attachment) {
merged = {
...merged,
closureInfo: {
...merged.closureInfo,
attachment: {
...merged.closureInfo.attachment,
data: blobs.closureAttachment,
},
},
};
}
return merged;
}
/**
* Load all registry entries in a SINGLE lightweight request.
* Uses exportAll({ lightweight: true }) which strips base64 attachment data
* server-side, reducing payload from potentially 30-60MB to <100KB.
* Load all registry entries. Entries are inherently lightweight because
* base64 blobs are stored in a separate namespace. No SQL stripping needed.
*/
export async function getAllEntries(
storage: RegistryStorage,
): Promise<RegistryEntry[]> {
const all = await storage.exportAll({ lightweight: true });
const all = await storage.exportAll();
const entries: RegistryEntry[] = [];
for (const [key, value] of Object.entries(all)) {
if (key.startsWith(STORAGE_PREFIX) && value) {
@@ -33,27 +126,98 @@ export async function getAllEntries(
}
/**
* Load a single full entry (with attachment data) for editing.
* Load a single full entry WITH attachment data (for editing).
* Loads the lightweight entry + its blob record and merges them.
*/
export async function getFullEntry(
storage: RegistryStorage,
blobStorage: RegistryStorage,
id: string,
): Promise<RegistryEntry | null> {
return storage.get<RegistryEntry>(`${STORAGE_PREFIX}${id}`);
const entry = await storage.get<RegistryEntry>(`${STORAGE_PREFIX}${id}`);
if (!entry) return null;
const blobs = await blobStorage.get<EntryBlobs>(id);
return mergeBlobs(entry, blobs);
}
/**
* Save an entry. Strips base64 from the main entry and stores blobs
* in the separate blob namespace so list queries stay fast.
*/
export async function saveEntry(
storage: RegistryStorage,
blobStorage: RegistryStorage,
entry: RegistryEntry,
): Promise<void> {
await storage.set(`${STORAGE_PREFIX}${entry.id}`, entry);
const { stripped, blobs } = extractBlobs(entry);
await storage.set(`${STORAGE_PREFIX}${entry.id}`, stripped);
if (blobs) {
await blobStorage.set(entry.id, blobs);
}
}
/**
* Delete an entry and its blob data.
*/
export async function deleteEntry(
storage: RegistryStorage,
blobStorage: RegistryStorage,
id: string,
): Promise<void> {
await storage.delete(`${STORAGE_PREFIX}${id}`);
// Clean up blob data (ignore errors if no blobs exist)
await blobStorage.delete(id).catch(() => {});
}
/**
* One-time migration: move base64 data from entries to blob namespace.
* Runs on first load if unmigrated entries exist. After migration,
* entries are inherently lightweight.
*/
export async function migrateEntryBlobs(
storage: RegistryStorage,
blobStorage: RegistryStorage,
): Promise<number> {
// Use lightweight=true so PostgreSQL SQL strips data (works for old entries)
const all = await storage.exportAll({ lightweight: true });
// Check if we already migrated (marker key)
const migrated = await storage.get<boolean>("__blobs_migrated__");
if (migrated) return 0;
// Load full data for entries that need migration
let count = 0;
for (const [key, value] of Object.entries(all)) {
if (!key.startsWith(STORAGE_PREFIX) || !value) continue;
const entry = value as RegistryEntry;
// Check if this entry might have attachments that need migrating
const hasAttachments =
(entry.attachments ?? []).length > 0 || entry.closureInfo?.attachment;
if (!hasAttachments) continue;
// Check if blobs already exist for this entry
const existingBlobs = await blobStorage.get<EntryBlobs>(entry.id);
if (existingBlobs) continue;
// Load the full entry (with base64 data) from DB
const full = await storage.get<RegistryEntry>(
`${STORAGE_PREFIX}${entry.id}`,
);
if (!full) continue;
// Check if there's actually heavy data to extract
const { stripped, blobs } = extractBlobs(full);
if (blobs) {
await storage.set(`${STORAGE_PREFIX}${entry.id}`, stripped);
await blobStorage.set(entry.id, blobs);
count++;
}
}
// Mark migration done
await storage.set("__blobs_migrated__", true);
return count;
}
const COMPANY_PREFIXES: Record<CompanyId, string> = {