2018-03-26 01:49:25 -07:00
|
|
|
# © 2018 James R. Barlow: github.com/jbarlow83
|
|
|
|
#
|
|
|
|
# This file is part of OCRmyPDF.
|
|
|
|
#
|
|
|
|
# OCRmyPDF is free software: you can redistribute it and/or modify
|
|
|
|
# it under the terms of the GNU General Public License as published by
|
|
|
|
# the Free Software Foundation, either version 3 of the License, or
|
|
|
|
# (at your option) any later version.
|
|
|
|
#
|
|
|
|
# OCRmyPDF is distributed in the hope that it will be useful,
|
|
|
|
# but WITHOUT ANY WARRANTY; without even the implied warranty of
|
|
|
|
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
|
|
|
|
# GNU General Public License for more details.
|
|
|
|
#
|
|
|
|
# You should have received a copy of the GNU General Public License
|
|
|
|
# along with OCRmyPDF. If not, see <http://www.gnu.org/licenses/>.
|
|
|
|
|
|
|
|
|
2018-12-30 01:28:15 -08:00
|
|
|
import datetime
|
2018-04-17 16:59:21 -07:00
|
|
|
from datetime import timezone
|
2019-01-04 13:20:41 -08:00
|
|
|
import logging
|
|
|
|
import mmap
|
2018-12-30 01:28:15 -08:00
|
|
|
from os import fspath
|
2018-07-07 01:35:05 -07:00
|
|
|
from pathlib import Path
|
|
|
|
from shutil import copyfile
|
2018-12-30 01:28:15 -08:00
|
|
|
from unittest.mock import MagicMock, patch
|
2018-03-26 01:49:25 -07:00
|
|
|
|
2018-12-30 01:28:15 -08:00
|
|
|
import pytest
|
2018-09-10 16:06:01 -07:00
|
|
|
|
2018-12-30 01:28:15 -08:00
|
|
|
import pikepdf
|
2019-04-04 21:02:38 +02:00
|
|
|
from ocrmypdf._jobcontext import PDFContext
|
2018-03-26 01:49:25 -07:00
|
|
|
from ocrmypdf.exceptions import ExitCode
|
2018-12-30 01:28:15 -08:00
|
|
|
from ocrmypdf.pdfa import SRGB_ICC_PROFILE, file_claims_pdfa, generate_pdfa_ps
|
|
|
|
from pikepdf.models.metadata import decode_pdf_date
|
2018-06-13 01:02:53 -07:00
|
|
|
|
|
|
|
try:
|
|
|
|
import fitz
|
|
|
|
except ImportError:
|
|
|
|
fitz = None
|
2018-03-26 01:49:25 -07:00
|
|
|
|
|
|
|
# pytest.helpers is dynamic
|
|
|
|
# pylint: disable=no-member
|
|
|
|
# pylint: disable=w0612
|
|
|
|
|
2018-12-12 22:01:21 -08:00
|
|
|
pytestmark = pytest.mark.filterwarnings('ignore:.*XMLParser.*:DeprecationWarning')
|
|
|
|
|
2018-03-26 01:49:25 -07:00
|
|
|
check_ocrmypdf = pytest.helpers.check_ocrmypdf
|
|
|
|
run_ocrmypdf = pytest.helpers.run_ocrmypdf
|
|
|
|
spoof = pytest.helpers.spoof
|
|
|
|
|
|
|
|
|
2018-12-30 01:27:49 -08:00
|
|
|
@pytest.mark.parametrize("output_type", ['pdfa', 'pdf'])
|
|
|
|
def test_preserve_metadata(spoof_tesseract_noop, output_type, resources, outpdf):
|
2018-09-10 16:06:01 -07:00
|
|
|
pdf_before = pikepdf.open(resources / 'graph.pdf')
|
2018-03-26 01:49:25 -07:00
|
|
|
|
|
|
|
output = check_ocrmypdf(
|
2018-12-30 01:27:49 -08:00
|
|
|
resources / 'graph.pdf',
|
|
|
|
outpdf,
|
|
|
|
'--output-type',
|
|
|
|
output_type,
|
|
|
|
env=spoof_tesseract_noop,
|
|
|
|
)
|
2018-03-26 01:49:25 -07:00
|
|
|
|
2018-09-10 16:06:01 -07:00
|
|
|
pdf_after = pikepdf.open(output)
|
2018-03-26 01:49:25 -07:00
|
|
|
|
|
|
|
for key in ('/Title', '/Author'):
|
2018-12-14 23:21:13 -08:00
|
|
|
assert pdf_before.docinfo[key] == pdf_after.docinfo[key]
|
2018-03-26 01:49:25 -07:00
|
|
|
|
|
|
|
pdfa_info = file_claims_pdfa(str(output))
|
|
|
|
assert pdfa_info['output'] == output_type
|
|
|
|
|
|
|
|
|
2018-12-30 01:27:49 -08:00
|
|
|
@pytest.mark.parametrize("output_type", ['pdfa', 'pdf'])
|
|
|
|
def test_override_metadata(spoof_tesseract_noop, output_type, resources, outpdf):
|
2018-03-26 01:49:25 -07:00
|
|
|
input_file = resources / 'c02-22.pdf'
|
|
|
|
german = 'Du siehst den Wald vor lauter Bäumen nicht.'
|
|
|
|
chinese = '孔子'
|
|
|
|
|
|
|
|
p, out, err = run_ocrmypdf(
|
2018-12-30 01:27:49 -08:00
|
|
|
input_file,
|
|
|
|
outpdf,
|
|
|
|
'--title',
|
|
|
|
german,
|
|
|
|
'--author',
|
|
|
|
chinese,
|
|
|
|
'--output-type',
|
|
|
|
output_type,
|
|
|
|
env=spoof_tesseract_noop,
|
|
|
|
)
|
2018-03-26 01:49:25 -07:00
|
|
|
|
|
|
|
assert p.returncode == ExitCode.ok, err
|
|
|
|
|
2018-09-10 16:06:01 -07:00
|
|
|
before = pikepdf.open(input_file)
|
|
|
|
after = pikepdf.open(outpdf)
|
2018-03-26 01:49:25 -07:00
|
|
|
|
2018-12-14 23:21:13 -08:00
|
|
|
assert after.docinfo.Title == german, after.docinfo
|
|
|
|
assert after.docinfo.Author == chinese, after.docinfo
|
|
|
|
assert after.docinfo.get('/Keywords', '') == ''
|
2018-05-03 16:30:20 -07:00
|
|
|
|
2018-12-14 23:21:13 -08:00
|
|
|
before_date = decode_pdf_date(str(before.docinfo.CreationDate))
|
|
|
|
after_date = decode_pdf_date(str(after.docinfo.CreationDate))
|
2018-05-03 16:30:20 -07:00
|
|
|
assert before_date == after_date
|
2018-03-26 01:49:25 -07:00
|
|
|
|
|
|
|
pdfa_info = file_claims_pdfa(outpdf)
|
|
|
|
assert pdfa_info['output'] == output_type
|
|
|
|
|
|
|
|
|
|
|
|
def test_high_unicode(spoof_tesseract_noop, resources, no_outpdf):
|
|
|
|
|
|
|
|
# Ghostscript doesn't support high Unicode, so neither do we, to be
|
|
|
|
# safe
|
|
|
|
input_file = resources / 'c02-22.pdf'
|
|
|
|
high_unicode = 'U+1030C is: 𐌌'
|
|
|
|
|
|
|
|
p, out, err = run_ocrmypdf(
|
2018-12-30 01:27:49 -08:00
|
|
|
input_file,
|
|
|
|
no_outpdf,
|
|
|
|
'--subject',
|
|
|
|
high_unicode,
|
|
|
|
'--output-type',
|
|
|
|
'pdfa',
|
|
|
|
env=spoof_tesseract_noop,
|
|
|
|
)
|
2018-03-26 01:49:25 -07:00
|
|
|
|
2018-03-26 02:23:19 -07:00
|
|
|
assert p.returncode == ExitCode.bad_args, err
|
|
|
|
|
|
|
|
|
2018-05-17 00:14:57 -07:00
|
|
|
@pytest.mark.skipif(not fitz, reason="test uses fitz")
|
2018-03-26 02:23:19 -07:00
|
|
|
@pytest.mark.parametrize('ocr_option', ['--skip-text', '--force-ocr'])
|
|
|
|
@pytest.mark.parametrize('output_type', ['pdf', 'pdfa'])
|
2018-12-30 01:27:49 -08:00
|
|
|
def test_bookmarks_preserved(
|
|
|
|
spoof_tesseract_noop, output_type, ocr_option, resources, outpdf
|
|
|
|
):
|
2018-03-26 02:23:19 -07:00
|
|
|
input_file = resources / 'toc.pdf'
|
|
|
|
before_toc = fitz.Document(str(input_file)).getToC()
|
|
|
|
|
|
|
|
check_ocrmypdf(
|
2018-12-30 01:27:49 -08:00
|
|
|
input_file,
|
|
|
|
outpdf,
|
2018-03-26 02:23:19 -07:00
|
|
|
ocr_option,
|
2018-12-30 01:27:49 -08:00
|
|
|
'--output-type',
|
|
|
|
output_type,
|
|
|
|
env=spoof_tesseract_noop,
|
|
|
|
)
|
2018-03-26 02:23:19 -07:00
|
|
|
|
|
|
|
after_toc = fitz.Document(str(outpdf)).getToC()
|
|
|
|
print(before_toc)
|
|
|
|
print(after_toc)
|
|
|
|
assert before_toc == after_toc
|
2018-04-02 17:53:39 -07:00
|
|
|
|
|
|
|
|
|
|
|
def seconds_between_dates(date1, date2):
|
|
|
|
return (date2 - date1).total_seconds()
|
|
|
|
|
|
|
|
|
|
|
|
@pytest.mark.parametrize('infile', ['trivial.pdf', 'jbig2.pdf'])
|
|
|
|
@pytest.mark.parametrize('output_type', ['pdf', 'pdfa'])
|
2018-12-30 01:27:49 -08:00
|
|
|
def test_creation_date_preserved(
|
|
|
|
spoof_tesseract_noop, output_type, resources, infile, outpdf
|
|
|
|
):
|
2018-04-02 17:53:39 -07:00
|
|
|
input_file = resources / infile
|
|
|
|
|
|
|
|
check_ocrmypdf(
|
2018-12-30 01:27:49 -08:00
|
|
|
input_file, outpdf, '--output-type', output_type, env=spoof_tesseract_noop
|
|
|
|
)
|
2018-09-10 16:06:01 -07:00
|
|
|
|
|
|
|
pdf_before = pikepdf.open(input_file)
|
|
|
|
pdf_after = pikepdf.open(outpdf)
|
|
|
|
|
|
|
|
before = pdf_before.trailer.get('/Info', {})
|
|
|
|
after = pdf_after.trailer.get('/Info', {})
|
2018-04-02 17:53:39 -07:00
|
|
|
|
|
|
|
if not before:
|
2018-12-13 18:13:30 -08:00
|
|
|
assert after.get('/CreationDate', '') != ''
|
2018-04-02 17:53:39 -07:00
|
|
|
else:
|
|
|
|
# We expect that the creation date stayed the same
|
2018-09-10 16:06:01 -07:00
|
|
|
date_before = decode_pdf_date(str(before['/CreationDate']))
|
|
|
|
date_after = decode_pdf_date(str(after['/CreationDate']))
|
2018-04-02 17:53:39 -07:00
|
|
|
assert seconds_between_dates(date_before, date_after) < 1000
|
|
|
|
|
|
|
|
# We expect that the modified date is quite recent
|
2018-09-10 16:06:01 -07:00
|
|
|
date_after = decode_pdf_date(str(after['/ModDate']))
|
2018-12-30 01:27:49 -08:00
|
|
|
assert seconds_between_dates(date_after, datetime.datetime.now(timezone.utc)) < 1000
|
2018-04-02 17:53:39 -07:00
|
|
|
|
2018-05-10 20:37:10 -07:00
|
|
|
|
2018-05-10 16:43:28 -07:00
|
|
|
@pytest.mark.parametrize('output_type', ['pdf', 'pdfa'])
|
2018-12-30 01:27:49 -08:00
|
|
|
def test_xml_metadata_preserved(spoof_tesseract_noop, output_type, resources, outpdf):
|
2018-05-10 16:43:28 -07:00
|
|
|
input_file = resources / 'graph.pdf'
|
2018-05-10 20:37:10 -07:00
|
|
|
|
|
|
|
try:
|
|
|
|
from libxmp import consts
|
2019-02-26 12:30:21 -08:00
|
|
|
from libxmp.utils import file_to_dict
|
2018-05-10 20:37:10 -07:00
|
|
|
except Exception:
|
|
|
|
pytest.skip("libxmp not available or libexempi3 not installed")
|
|
|
|
|
|
|
|
before = file_to_dict(str(input_file))
|
2018-05-10 16:43:28 -07:00
|
|
|
|
|
|
|
check_ocrmypdf(
|
2018-12-30 01:27:49 -08:00
|
|
|
input_file, outpdf, '--output-type', output_type, env=spoof_tesseract_noop
|
|
|
|
)
|
2018-05-10 16:43:28 -07:00
|
|
|
|
2018-05-10 20:37:10 -07:00
|
|
|
after = file_to_dict(str(outpdf))
|
|
|
|
|
2018-05-10 16:43:28 -07:00
|
|
|
equal_properties = [
|
2018-05-10 20:37:10 -07:00
|
|
|
'dc:contributor',
|
|
|
|
'dc:coverage',
|
|
|
|
'dc:creator',
|
|
|
|
'dc:description',
|
|
|
|
'dc:format',
|
|
|
|
'dc:identifier',
|
|
|
|
'dc:language',
|
|
|
|
'dc:publisher',
|
|
|
|
'dc:relation',
|
|
|
|
'dc:rights',
|
|
|
|
'dc:source',
|
|
|
|
'dc:subject',
|
|
|
|
'dc:title',
|
|
|
|
'dc:type',
|
|
|
|
'pdf:keywords',
|
2018-05-10 16:43:28 -07:00
|
|
|
]
|
|
|
|
might_change_properties = [
|
2018-05-10 20:37:10 -07:00
|
|
|
'dc:date',
|
|
|
|
'pdf:pdfversion',
|
|
|
|
'pdf:Producer',
|
|
|
|
'xmp:CreateDate',
|
|
|
|
'xmp:ModifyDate',
|
|
|
|
'xmp:MetadataDate',
|
|
|
|
'xmp:CreatorTool',
|
|
|
|
'xmpMM:DocumentId',
|
2018-12-30 01:27:49 -08:00
|
|
|
'xmpMM:DnstanceId',
|
2018-05-10 16:43:28 -07:00
|
|
|
]
|
|
|
|
|
2018-05-10 20:37:10 -07:00
|
|
|
# Cleanup messy data structure
|
|
|
|
# Top level is key-value mapping of namespaces to keys under namespace,
|
|
|
|
# so we put everything in the same namespace
|
|
|
|
def unify_namespaces(xmpdict):
|
|
|
|
for entries in xmpdict.values():
|
|
|
|
yield from entries
|
2018-05-10 16:43:28 -07:00
|
|
|
|
2018-05-10 20:37:10 -07:00
|
|
|
# Now we have a list of (key, value, {infodict}). We don't care about
|
|
|
|
# infodict. Just flatten to keys and values
|
|
|
|
def keyval_from_tuple(list_of_tuples):
|
|
|
|
for k, v, *_ in list_of_tuples:
|
|
|
|
yield k, v
|
|
|
|
|
|
|
|
before = dict(keyval_from_tuple(unify_namespaces(before)))
|
|
|
|
after = dict(keyval_from_tuple(unify_namespaces(after)))
|
2018-05-10 16:43:28 -07:00
|
|
|
|
2018-05-10 20:37:10 -07:00
|
|
|
for prop in equal_properties:
|
|
|
|
if prop in before:
|
2018-12-31 15:00:02 -08:00
|
|
|
assert prop in after, f'{prop} dropped from xmp'
|
2018-05-10 20:37:10 -07:00
|
|
|
assert before[prop] == after[prop]
|
2018-05-17 00:14:57 -07:00
|
|
|
|
2018-05-10 20:37:10 -07:00
|
|
|
# Certain entries like title appear as dc:title[1], with the possibility
|
|
|
|
# of several
|
2018-12-31 15:00:02 -08:00
|
|
|
propidx = f'{prop}[1]'
|
2018-05-10 20:37:10 -07:00
|
|
|
if propidx in before:
|
2018-12-30 01:27:49 -08:00
|
|
|
assert (
|
|
|
|
after.get(propidx) == before[propidx]
|
|
|
|
or after.get(prop) == before[propidx]
|
|
|
|
)
|
2018-07-07 01:35:05 -07:00
|
|
|
|
|
|
|
|
|
|
|
def test_srgb_in_unicode_path(tmpdir):
|
|
|
|
"""Test that we can produce pdfmark when install path is not ASCII"""
|
|
|
|
|
2018-07-08 01:01:06 -07:00
|
|
|
dstdir = Path(fspath(tmpdir)) / b'\xe4\x80\x80'.decode('utf-8')
|
2018-07-07 01:35:05 -07:00
|
|
|
dstdir.mkdir()
|
|
|
|
dst = dstdir / 'sRGB.icc'
|
|
|
|
|
|
|
|
copyfile(SRGB_ICC_PROFILE, fspath(dst))
|
|
|
|
|
|
|
|
with patch('ocrmypdf.pdfa.SRGB_ICC_PROFILE', new=str(dst)):
|
2018-12-31 00:05:13 -08:00
|
|
|
generate_pdfa_ps(dstdir / 'out.ps')
|
2018-09-11 14:44:16 -07:00
|
|
|
|
|
|
|
|
|
|
|
def test_kodak_toc(resources, outpdf, spoof_tesseract_noop):
|
|
|
|
output = check_ocrmypdf(
|
2018-12-30 01:27:49 -08:00
|
|
|
resources / 'kcs.pdf', outpdf, '--output-type', 'pdf', env=spoof_tesseract_noop
|
|
|
|
)
|
2018-09-11 14:44:16 -07:00
|
|
|
|
|
|
|
p = pikepdf.open(outpdf)
|
|
|
|
|
|
|
|
if pikepdf.Name.First in p.root.Outlines:
|
|
|
|
assert isinstance(p.root.Outlines.First, pikepdf.Dictionary)
|
2018-12-30 00:13:25 -08:00
|
|
|
|
|
|
|
|
|
|
|
def test_metadata_fixup_warning(resources, outdir):
|
|
|
|
from ocrmypdf._pipeline import metadata_fixup
|
|
|
|
|
|
|
|
input_files = [
|
|
|
|
str(outdir / 'graph.repaired.pdf'),
|
|
|
|
str(outdir / 'layers.rendered.pdf'),
|
|
|
|
str(outdir / 'pdfa.pdf'), # It is okay that this is not a PDF/A
|
|
|
|
]
|
|
|
|
for f in input_files:
|
|
|
|
copyfile(resources / 'graph.pdf', f)
|
|
|
|
|
|
|
|
log = MagicMock()
|
|
|
|
context = MagicMock()
|
|
|
|
metadata_fixup(
|
|
|
|
input_files_groups=input_files,
|
|
|
|
output_file=outdir / 'out.pdf',
|
|
|
|
log=log,
|
2018-12-30 01:27:49 -08:00
|
|
|
context=context,
|
|
|
|
)
|
2018-12-30 00:13:25 -08:00
|
|
|
log.warning.assert_not_called()
|
|
|
|
|
|
|
|
# Now add some metadata that will not be copyable
|
|
|
|
graph = pikepdf.open(outdir / 'graph.repaired.pdf')
|
|
|
|
with graph.open_metadata() as meta:
|
|
|
|
meta['prism2:publicationName'] = 'OCRmyPDF Test'
|
|
|
|
graph.save(outdir / 'graph.repaired.pdf')
|
|
|
|
|
|
|
|
log = MagicMock()
|
|
|
|
context = MagicMock()
|
|
|
|
metadata_fixup(
|
|
|
|
input_files_groups=input_files,
|
|
|
|
output_file=outdir / 'out.pdf',
|
|
|
|
log=log,
|
2018-12-30 01:27:49 -08:00
|
|
|
context=context,
|
|
|
|
)
|
2018-12-30 00:13:25 -08:00
|
|
|
log.warning.assert_called_once()
|
2019-01-04 13:20:41 -08:00
|
|
|
|
|
|
|
|
|
|
|
def test_prevent_gs_invalid_xml(resources, outdir):
|
|
|
|
from ocrmypdf.__main__ import parser
|
|
|
|
from ocrmypdf._pipeline import convert_to_pdfa
|
|
|
|
from ocrmypdf.pdfa import generate_pdfa_ps
|
|
|
|
from ocrmypdf.pdfinfo import PdfInfo
|
|
|
|
|
|
|
|
generate_pdfa_ps(outdir / 'pdfa.ps')
|
|
|
|
copyfile(resources / 'enron1.pdf', outdir / 'layers.rendered.pdf')
|
|
|
|
|
2019-02-26 12:30:21 -08:00
|
|
|
options = parser.parse_args(
|
|
|
|
args=['-j', '1', '--output-type', 'pdfa-2', 'a.pdf', 'b.pdf']
|
2019-01-04 13:20:41 -08:00
|
|
|
)
|
2019-04-04 21:02:38 +02:00
|
|
|
pdfinfo = PdfInfo(resources / 'enron1.pdf')
|
|
|
|
context = PDFContext(options, outdir, resources / 'enron1.pdf', pdfinfo)
|
2019-01-04 13:20:41 -08:00
|
|
|
|
2019-04-04 21:02:38 +02:00
|
|
|
convert_to_pdfa(str(outdir / 'layers.rendered.pdf'), str(outdir / 'pdfa.ps'), context)
|
2019-01-04 13:20:41 -08:00
|
|
|
|
|
|
|
with open(outdir / 'pdfa.pdf', 'rb') as f:
|
2019-02-26 12:30:21 -08:00
|
|
|
with mmap.mmap(
|
|
|
|
f.fileno(), 0, flags=mmap.MAP_PRIVATE, prot=mmap.PROT_READ
|
|
|
|
) as mm:
|
2019-01-04 13:20:41 -08:00
|
|
|
# Since the XML may be invalid, we scan instead of actually feeding it
|
|
|
|
# to a parser.
|
|
|
|
XMP_MAGIC = b'W5M0MpCehiHzreSzNTczkc9d'
|
|
|
|
xmp_start = mm.find(XMP_MAGIC)
|
|
|
|
xmp_end = mm.rfind(b'<?xpacket end', xmp_start)
|
|
|
|
assert 0 < xmp_start < xmp_end
|
|
|
|
assert mm.find(b'�', xmp_start, xmp_end) == -1, "found escaped nul"
|
|
|
|
assert mm.find(b'\x00', xmp_start, xmp_end) == -1
|