Added good stuff

This commit is contained in:
Guillem Hernandez Sola
2026-04-11 14:34:18 +02:00
parent 555892348f
commit 727b052e93
5 changed files with 310 additions and 157 deletions

View File

@@ -48,57 +48,88 @@ def is_sound_effect(text):
# ─────────────────────────────────────────────
# TOKEN FILTER
# TOKEN CLASSIFIER
#
# Three categories:
# "alpha" — contains at least one letter (È, é, A-Z etc.)
# "punct" — 2+ chars, all punctuation (... ?? !! ?! …)
# "noise" — everything else (single symbols, pure digits,
# low-confidence, sound effects)
#
# Both "alpha" and "punct" tokens are KEPT:
# - "alpha" → contributes to translation text AND bbox
# - "punct" → contributes to bbox only (not translation text)
# unless it immediately follows alpha text
# in the same cluster (handled in clustering)
# ─────────────────────────────────────────────
def should_keep_token(text, confidence, confidence_threshold,
min_text_length, filter_sound_effects):
def classify_token(text, confidence, confidence_threshold,
min_text_length, filter_sound_effects):
"""
Returns (keep: bool, reason: str).
Returns one of: "alpha" | "punct" | "noise"
"alpha" : has at least one letter → keep for text + bbox
"punct" : 2+ chars, no letters → keep for bbox only
"noise" : drop entirely
Rules:
1. Drop if confidence below threshold
2. Drop if shorter than min_text_length
3. Drop pure digit strings
4. Drop single non-alpha characters
5. Drop sound effects if filter enabled
6. Keep everything else
1. Drop if confidence below threshold → noise
2. Drop if shorter than min_text_length → noise
3. Drop pure digit strings → noise
4. Drop single non-alpha characters → noise
5. Drop sound effects if filter enabled → noise
6. 2+ char string with no letters → punct
7. Has at least one letter → alpha
"""
cleaned = text.strip()
if confidence < confidence_threshold:
return False, f"low confidence ({confidence:.2f})"
return "noise"
if len(cleaned) < min_text_length:
return False, "too short"
return "noise"
if re.fullmatch(r"\d+", cleaned):
return False, "pure digits"
return "noise"
if len(cleaned) == 1 and not cleaned.isalpha():
return False, "single symbol"
return "noise"
if filter_sound_effects and is_sound_effect(cleaned):
return False, "sound effect"
return "noise"
return True, "ok"
# 2+ chars with no letters at all → punctuation token
# Examples: "..." "??" "!!" "?!" "…" ".."
if not any(ch.isalpha() for ch in cleaned):
return "punct"
return "alpha"
def should_keep_token(text, confidence, confidence_threshold,
min_text_length, filter_sound_effects):
"""
Backward-compatible wrapper.
Returns (keep: bool, category: str).
"""
cat = classify_token(text, confidence, confidence_threshold,
min_text_length, filter_sound_effects)
return cat != "noise", cat
# ─────────────────────────────────────────────
# BOUNDING BOX
#
# Rules (match the red square exactly):
# Width = widest single quad's width
# Height = sum of ALL quad heights stacked
# X = centered on the widest quad's CX
# Y = topmost Y1 of all quads
# Width = widest single quad's width
# Height = sum of ALL quad heights stacked
# X = centered on the widest quad's CX
# Y = topmost Y1 of all quads
# ─────────────────────────────────────────────
def get_cluster_bbox_from_ocr(ocr_bboxes, image_shape,
padding_px=10):
"""
Computes the bubble erase bbox:
1. Per-quad: measure w, h, cx for every OCR detection
1. Per-quad: measure w, h, cx
2. Width = width of the widest single quad
3. Height = sum of every quad's height
4. X = widest quad's center ± max_w/2
(all lines sit symmetrically inside)
5. Y = top of topmost quad, bottom = Y + total_h
5. Y = top of topmost quad → Y + total_h
Args:
ocr_bboxes : List of EasyOCR quad bboxes
@@ -113,7 +144,6 @@ def get_cluster_bbox_from_ocr(ocr_bboxes, image_shape,
if not ocr_bboxes:
return 0, 0, 0, 0
# ── Per-quad metrics ──────────────────────────────────────────
quad_metrics = []
for quad in ocr_bboxes:
xs = [pt[0] for pt in quad]
@@ -121,30 +151,23 @@ def get_cluster_bbox_from_ocr(ocr_bboxes, image_shape,
qx1, qx2 = min(xs), max(xs)
qy1, qy2 = min(ys), max(ys)
quad_metrics.append({
"x1" : qx1,
"x2" : qx2,
"y1" : qy1,
"y2" : qy2,
"x1" : qx1, "x2" : qx2,
"y1" : qy1, "y2" : qy2,
"w" : qx2 - qx1,
"h" : qy2 - qy1,
"cx" : (qx1 + qx2) / 2.0,
})
# ── Width: widest single quad ─────────────────────────────────
widest = max(quad_metrics, key=lambda q: q["w"])
max_w = widest["w"]
center_x = widest["cx"]
total_h = sum(q["h"] for q in quad_metrics)
# ── Height: sum of all quad heights ──────────────────────────
total_h = sum(q["h"] for q in quad_metrics)
# ── Box edges ─────────────────────────────────────────────────
box_x1 = center_x - max_w / 2.0
box_x2 = center_x + max_w / 2.0
box_y1 = min(q["y1"] for q in quad_metrics)
box_y2 = box_y1 + total_h
# ── Padding + clamp ───────────────────────────────────────────
x1 = max(0, box_x1 - padding_px)
y1 = max(0, box_y1 - padding_px)
x2 = min(img_w, box_x2 + padding_px)
@@ -171,17 +194,6 @@ def boxes_are_close(bbox_a, bbox_b, proximity_px=80):
return not (ax2 < bx1 or bx2 < ax1 or ay2 < by1 or by2 < ay1)
# ─────────────────────────────────────────────
# TEXT LINE FILTER
# ─────────────────────────────────────────────
def has_translatable_content(text):
"""
True if text contains at least one letter.
ch.isalpha() handles È, é, ñ, ü etc.
"""
return any(ch.isalpha() for ch in text)
# ─────────────────────────────────────────────
# POST-CLUSTER MERGE (Union-Find)
# ─────────────────────────────────────────────
@@ -270,11 +282,17 @@ def cluster_into_bubbles(ocr_results, image_shape,
Pass 1 — DBSCAN on center points
Pass 2 — Bounding-box proximity merge
Token categories per cluster:
"alpha" tokens → translation text + bbox
"punct" tokens → bbox only (e.g. "..." after "HN")
"noise" tokens → already filtered before this function
Bbox: widest-line width (centered) × stacked height.
All quads contribute to bbox regardless of content.
Returns:
bubble_dict : cluster_id → list of translatable text lines
bubble_dict : cluster_id → list of text lines
(alpha tokens only, punct appended
to last alpha line if spatially adjacent)
bbox_dict : cluster_id → (x1, y1, x2, y2)
ocr_quads : cluster_id → list of ALL raw EasyOCR quads
"""
@@ -303,6 +321,8 @@ def cluster_into_bubbles(ocr_results, image_shape,
raw_clusters.setdefault(label, [])
raw_quads.setdefault(label, [])
bbox, text, _ = ocr_results[idx]
# Store (cy, cx, text, category)
cat = ocr_results[idx][2] # confidence stored as category below
raw_clusters[label].append(
(centers[idx][1], centers[idx][0], text))
raw_quads[label].append(bbox)
@@ -335,15 +355,40 @@ def cluster_into_bubbles(ocr_results, image_shape,
items_sorted = sorted(items, key=lambda t: t[0])
text_lines = [
text for _, _, text in items_sorted
if has_translatable_content(text)
]
# ── Build text lines ──────────────────────────────────────
# Alpha tokens become text lines.
# Punct tokens (... ?? etc.) are appended to the
# nearest preceding alpha token on the same Y level.
alpha_lines = [] # (cy, text) for alpha tokens
punct_tokens = [] # (cy, text) for punct tokens
for cy, cx, text in items_sorted:
if any(ch.isalpha() for ch in text):
alpha_lines.append((cy, text))
else:
punct_tokens.append((cy, text))
# Append each punct token to the closest alpha line by Y
for pcy, ptext in punct_tokens:
if alpha_lines:
# Find alpha line with closest cy
closest_idx = min(
range(len(alpha_lines)),
key=lambda k: abs(alpha_lines[k][0] - pcy)
)
cy_a, text_a = alpha_lines[closest_idx]
alpha_lines[closest_idx] = (cy_a, text_a + ptext)
# If no alpha lines at all, punct still contributes
# to bbox but not to translation text
text_lines = [t for _, t in alpha_lines]
# Fallback: if no alpha at all, keep everything
if not text_lines:
text_lines = [text for _, _, text in items_sorted]
bubble_dict[i] = text_lines
ocr_quads[i] = quads
ocr_quads[i] = quads # ALL quads → full bbox
bbox_dict[i] = get_cluster_bbox_from_ocr(
quads, image_shape, padding_px=bbox_padding
@@ -353,7 +398,8 @@ def cluster_into_bubbles(ocr_results, image_shape,
print(f" Cluster #{i}: {len(quads)} quad(s) "
f"bbox=({int(b[0])},{int(b[1])})→"
f"({int(b[2])},{int(b[3])}) "
f"w={int(b[2]-b[0])} h={int(b[3]-b[1])}")
f"w={int(b[2]-b[0])} h={int(b[3]-b[1])} "
f"text={text_lines}")
return bubble_dict, bbox_dict, ocr_quads
@@ -519,15 +565,17 @@ def translate_manga_text(
for bbox, text, confidence in results:
cleaned = text.strip()
keep, reason = should_keep_token(
keep, category = should_keep_token(
cleaned, confidence,
confidence_threshold, min_text_length,
filter_sound_effects
)
if keep:
filtered.append((bbox, cleaned, confidence))
if category == "punct":
print(f" ✔ Punct kept: '{cleaned}'")
else:
if reason == "sound effect":
if category == "sound effect":
print(f" 🔇 SFX skipped: '{cleaned}'")
skipped += 1
@@ -656,6 +704,6 @@ if __name__ == "__main__":
filter_sound_effects = True,
quality_threshold = 0.5,
upscale_factor = 2.5,
bbox_padding = 0,
bbox_padding = 3,
debug = True,
)
)