pdf.py 6.2 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171
  1. # -*- coding: utf-8 -*-
  2. from .__load__ import *
  3. import fitz
  4. import re
  5. class Pdf(Base):
  6. def json(self):
  7. if not self.file:
  8. return False
  9. self.getPath()
  10. doc = fitz.open(self.file)
  11. page_count = doc.page_count
  12. result = {'total': page_count, 'pages': []}
  13. # 黑名单符号,出现即整句丢弃
  14. blacklist_chars = "•★◆●◇▪…※§‡†¤₪"
  15. blacklist_pattern = f"[{re.escape(blacklist_chars)}]"
  16. def is_page_number(text):
  17. text = text.strip().lower()
  18. return (
  19. re.fullmatch(r"(page)?\s*\d+\s*(of\s*\d+)?", text)
  20. or re.fullmatch(r"\d+", text)
  21. or re.fullmatch(r"\d+\s*/\s*\d+", text)
  22. )
  23. def is_valid_english(text):
  24. """
  25. 保留纯英文短语、单词、句子,包括标点符号(如引号、感叹号、句号等)
  26. """
  27. # 过滤掉包含中文或其他语言的行
  28. if re.search(r'[\u4e00-\u9fff]', text):
  29. return False
  30. # 过滤掉非 ascii 且非常规英文标点的字符
  31. if re.search(rf"{blacklist_pattern}", text):
  32. return False
  33. # 至少要包含一个字母或句号等基本英文结构
  34. if not re.search(r"[A-Za-z]", text):
  35. return False
  36. return True
  37. for page_num in range(len(doc)):
  38. page_obj = doc.load_page(page_num)
  39. page_height = page_obj.rect.height
  40. text = page_obj.get_text().strip()
  41. blocks = page_obj.get_text("dict", sort=True)["blocks"]
  42. has_visible_content = any(
  43. b for b in blocks if b['type'] in (0, 1)
  44. )
  45. if not text and not has_visible_content:
  46. continue
  47. # 封面图
  48. try:
  49. pix = page_obj.get_pixmap(matrix=fitz.Matrix(0.3, 0.3))
  50. cover_file = f"{self.param['path']}cover_page_{page_num+1}.png"
  51. pix.save(cover_file)
  52. if 'host' in self.param and self.param['host']:
  53. cover_file = cover_file.replace(Demeter.path + 'runtime/', self.param['host'])
  54. except Exception as e:
  55. print(f"封面图失败: {e}")
  56. cover_file = ""
  57. page_items = []
  58. for i, b in enumerate(blocks):
  59. y_top = b["bbox"][1]
  60. y_bottom = b["bbox"][3]
  61. if y_top < page_height * 0.02 or y_bottom > page_height * 0.98:
  62. continue
  63. if b['type'] == 0:
  64. text_content = ""
  65. for line in b["lines"]:
  66. line_text = ""
  67. for span in line["spans"]:
  68. span_text = span["text"].strip()
  69. if not span_text or is_page_number(span_text):
  70. continue
  71. line_text += span_text + " "
  72. line_text = line_text.strip()
  73. if not line_text:
  74. continue
  75. # 全行过滤:含黑名单符号或不符合英文语义
  76. if not is_valid_english(line_text):
  77. continue
  78. text_content += line_text + "\n"
  79. text_content = text_content.strip()
  80. text_content = self.clean_text(self.removeDomains(text_content))
  81. if text_content:
  82. page_items.append({
  83. "type": "text",
  84. "content": text_content,
  85. "pos": b["bbox"]
  86. })
  87. elif b['type'] == 1:
  88. image_bytes = b.get("image", b"")
  89. if not image_bytes or len(image_bytes) < 100:
  90. continue
  91. image_ext = "png"
  92. image_file = f"{self.param['path']}page{page_num+1}_img_{i}.{image_ext}"
  93. with open(image_file, "wb") as f:
  94. f.write(image_bytes)
  95. if 'host' in self.param and self.param['host']:
  96. image_file = image_file.replace(Demeter.path + 'runtime/', self.param['host'])
  97. page_items.append({
  98. "type": "image",
  99. "ext": image_ext,
  100. "content": image_file,
  101. "pos": b["bbox"]
  102. })
  103. result['pages'].append({
  104. "cover": cover_file,
  105. "content": page_items
  106. })
  107. return result
  108. # 提取为langchain的Document格式
  109. def doc(self):
  110. if not self.file:
  111. return False
  112. #loader = PyPDFLoader(self.file, extract_images=False)
  113. #return loader.load()
  114. doc = fitz.open(self.file)
  115. result = {'page': page, 'content': []}
  116. for page_num in range(len(doc)):
  117. page = doc.load_page(page_num)
  118. # 提取文本
  119. text = page.get_text()
  120. # 提取图片中的文字
  121. image_texts = []
  122. for img in page.get_images(full=True):
  123. xref = img[0]
  124. base_image = doc.extract_image(xref)
  125. image_bytes = base_image["image"]
  126. image = Image.open(io.BytesIO(image_bytes))
  127. #result = Demeter.service('loader', 'extract').get(image)
  128. ocr_result = ocr.ocr(image)
  129. for line in ocr_result[0]:
  130. image_texts.append(line[1])
  131. '''
  132. # OCR 识别
  133. ocr_result = ocr_reader.readtext(image)
  134. image_texts = " ".join([line[1] for line in ocr_result]).strip()
  135. '''
  136. # 合并文字 + 图片文字
  137. full_text = text.strip() + "\n" + "\n".join(image_texts)
  138. document = langchain.schema.Document(page_content=full_text)
  139. result['content'].append(document)
  140. return result
  141. def clean_text(self, s):
  142. return re.sub(r'[\x00-\x08\x0B\x0C\x0E-\x1F\x7F]', '', s)