refactor(resource): improve resource management and exception safety across controllers and utilities (#5350)

TLDR:
- Use try-with-resources for InputStreams, PDDocument, TempFile, and
ByteArrayOutputStream to ensure proper cleanup
- Refactor PDF manipulation methods to handle exceptions and resource
closure more robustly
- Simplify session sorting logic in SessionPersistentRegistry
- Add missing resource cleanup in MergeController and
SplitPdfBySectionsController
- Update PrintFileController to close streams and documents safely
- Add unit test for SplitPdfBySizeController

# Description of Changes

This pull request introduces several improvements to resource management
and error handling throughout the codebase, especially for temporary
files and PDF document objects. The changes help prevent resource leaks
by ensuring files and documents are properly closed or deleted in the
event of errors or after use. Notable updates include the use of
try-with-resources, custom AutoCloseable wrappers, and enhanced error
handling logic.

**Resource Management and Error Handling Improvements**

* Added try-catch blocks to delete temporary files if an exception
occurs during file conversion in `GeneralUtils.java`, ensuring no
orphaned temp files are left behind
* Introduced the `TempFile` AutoCloseable helper class in
`InvertFullColorStrategy.java`, and refactored the PDF processing logic
to use try-with-resources for both temporary files and `PDDocument`
objects, ensuring proper cleanup
* Improved error handling in `MergeController.java` by ensuring that
partially created merged documents are closed if an error occurs during
the merge process
* Enhanced the splitting logic in `SplitPdfBySectionsController.java` to
close any partially created `PDDocument` objects if an exception is
thrown, preventing resource leaks

**Refactoring for Safer Document Handling**

* Refactored `handleSplitBySize` in `SplitPdfBySizeController.java` to
use a custom `DocHolder` AutoCloseable wrapper for managing the
lifecycle of `PDDocument` objects, and updated all related logic to use
this wrapper, improving code safety and clarity
* Updated `handleSplitByPageCount` in `SplitPdfBySizeController.java` to
ensure `PDDocument` objects are set to null after being saved and
closed, reducing the risk of operating on closed resources

<!--
Please provide a summary of the changes, including:

- What was changed
- Why the change was made
- Any challenges encountered

Closes #(issue_number)
-->

---

## Checklist

### General

- [ ] I have read the [Contribution
Guidelines](https://github.com/Stirling-Tools/Stirling-PDF/blob/main/CONTRIBUTING.md)
- [ ] I have read the [Stirling-PDF Developer
Guide](https://github.com/Stirling-Tools/Stirling-PDF/blob/main/devGuide/DeveloperGuide.md)
(if applicable)
- [ ] I have read the [How to add new languages to
Stirling-PDF](https://github.com/Stirling-Tools/Stirling-PDF/blob/main/devGuide/HowToAddNewLanguage.md)
(if applicable)
- [ ] I have performed a self-review of my own code
- [ ] My changes generate no new warnings

### Documentation

- [ ] I have updated relevant docs on [Stirling-PDF's doc
repo](https://github.com/Stirling-Tools/Stirling-Tools.github.io/blob/main/docs/)
(if functionality has heavily changed)
- [ ] I have read the section [Add New Translation
Tags](https://github.com/Stirling-Tools/Stirling-PDF/blob/main/devGuide/HowToAddNewLanguage.md#add-new-translation-tags)
(for new translation tags only)

### Translations (if applicable)

- [ ] I ran
[`scripts/counter_translation.py`](https://github.com/Stirling-Tools/Stirling-PDF/blob/main/docs/counter_translation.md)

### UI Changes (if applicable)

- [ ] Screenshots or videos demonstrating the UI changes are attached
(e.g., as comments or direct attachments in the PR)

### Testing (if applicable)

- [ ] I have tested my changes locally. Refer to the [Testing
Guide](https://github.com/Stirling-Tools/Stirling-PDF/blob/main/devGuide/DeveloperGuide.md#6-testing)
for more details.

---------

Signed-off-by: Balázs Szücs <bszucs1209@gmail.com>
This commit is contained in:
Balázs Szücs 2025-12-31 19:09:29 +01:00 committed by GitHub
parent 3fd0398648
commit 02f9785212
No known key found for this signature in database
GPG Key ID: B5690EEEBB952194
19 changed files with 856 additions and 605 deletions

View File

@ -91,6 +91,14 @@ public class GeneralUtils {
while ((bytesRead = inputStream.read(buffer)) != -1) {
outputStream.write(buffer, 0, bytesRead);
}
} catch (IOException e) {
if (tempFile.exists()) {
try {
Files.delete(tempFile.toPath());
} catch (IOException ignored) {
}
}
throw e;
}
return tempFile;
}
@ -499,6 +507,12 @@ public class GeneralUtils {
while ((bytesRead = in.read(buffer)) != -1) {
out.write(buffer, 0, bytesRead);
}
} catch (IOException e) {
try {
Files.deleteIfExists(tempFile);
} catch (IOException ignored) {
}
throw e;
}
return tempFile.toFile();
}

View File

@ -63,15 +63,16 @@ public class ImageProcessingUtils {
} else {
int width = image.getWidth();
int height = image.getHeight();
int[] pixels = new int[width * height];
image.getRGB(0, 0, width, height, pixels, 0, width);
byte[] data = new byte[width * height * 3];
int index = 0;
for (int y = 0; y < height; y++) {
for (int x = 0; x < width; x++) {
int rgb = image.getRGB(x, y);
data[index++] = (byte) ((rgb >> 16) & 0xFF); // Red
data[index++] = (byte) ((rgb >> 8) & 0xFF); // Green
data[index++] = (byte) (rgb & 0xFF); // Blue
}
for (int rgb : pixels) {
data[index++] = (byte) ((rgb >> 16) & 0xFF); // Red
data[index++] = (byte) ((rgb >> 8) & 0xFF); // Green
data[index++] = (byte) (rgb & 0xFF); // Blue
}
return data;
}

View File

@ -32,83 +32,76 @@ public class InvertFullColorStrategy extends ReplaceAndInvertColorStrategy {
@Override
public InputStreamResource replace() throws IOException {
File file = null;
try {
// Create a temporary file, with the original filename from the multipart file
file = Files.createTempFile("temp", getFileInput().getOriginalFilename()).toFile();
try (TempFile tempFile =
new TempFile(
Files.createTempFile("temp", getFileInput().getOriginalFilename())
.toFile())) {
// Transfer the content of the multipart file to the file
getFileInput().transferTo(file);
getFileInput().transferTo(tempFile.getFile());
// Load the uploaded PDF
PDDocument document = Loader.loadPDF(file);
try (PDDocument document = Loader.loadPDF(tempFile.getFile())) {
// Render each page and invert colors
PDFRenderer pdfRenderer = new PDFRenderer(document);
for (int page = 0; page < document.getNumberOfPages(); page++) {
BufferedImage image;
// Render each page and invert colors
PDFRenderer pdfRenderer = new PDFRenderer(document);
for (int page = 0; page < document.getNumberOfPages(); page++) {
BufferedImage image;
// Use global maximum DPI setting, fallback to 300 if not set
int renderDpi = 300; // Default fallback
ApplicationProperties properties =
ApplicationContextProvider.getBean(ApplicationProperties.class);
if (properties != null && properties.getSystem() != null) {
renderDpi = properties.getSystem().getMaxDPI();
}
final int dpi = renderDpi;
final int pageNum = page;
// Use global maximum DPI setting, fallback to 300 if not set
int renderDpi = 300; // Default fallback
ApplicationProperties properties =
ApplicationContextProvider.getBean(ApplicationProperties.class);
if (properties != null && properties.getSystem() != null) {
renderDpi = properties.getSystem().getMaxDPI();
}
final int dpi = renderDpi;
final int pageNum = page;
image =
ExceptionUtils.handleOomRendering(
pageNum + 1,
dpi,
() -> pdfRenderer.renderImageWithDPI(pageNum, dpi));
image =
ExceptionUtils.handleOomRendering(
pageNum + 1,
dpi,
() -> pdfRenderer.renderImageWithDPI(pageNum, dpi));
// Invert the colors
invertImageColors(image);
// Invert the colors
invertImageColors(image);
// Create a new PDPage from the inverted image
PDPage pdPage = document.getPage(page);
File tempImageFile = null;
try {
tempImageFile = convertToBufferedImageTpFile(image);
PDImageXObject pdImage =
PDImageXObject.createFromFileByContent(tempImageFile, document);
// Create a new PDPage from the inverted image
PDPage pdPage = document.getPage(page);
File tempImageFile = null;
try {
tempImageFile = convertToBufferedImageTpFile(image);
PDImageXObject pdImage =
PDImageXObject.createFromFileByContent(tempImageFile, document);
PDPageContentStream contentStream =
new PDPageContentStream(
document,
pdPage,
PDPageContentStream.AppendMode.OVERWRITE,
true);
contentStream.drawImage(
pdImage,
0,
0,
pdPage.getMediaBox().getWidth(),
pdPage.getMediaBox().getHeight());
contentStream.close();
} finally {
if (tempImageFile != null && tempImageFile.exists()) {
Files.delete(tempImageFile.toPath());
try (PDPageContentStream contentStream =
new PDPageContentStream(
document,
pdPage,
PDPageContentStream.AppendMode.OVERWRITE,
true)) {
contentStream.drawImage(
pdImage,
0,
0,
pdPage.getMediaBox().getWidth(),
pdPage.getMediaBox().getHeight());
}
} finally {
if (tempImageFile != null && tempImageFile.exists()) {
Files.delete(tempImageFile.toPath());
}
}
}
}
// Save the modified PDF to a ByteArrayOutputStream
ByteArrayOutputStream byteArrayOutputStream = new ByteArrayOutputStream();
document.save(byteArrayOutputStream);
document.close();
// Save the modified PDF to a ByteArrayOutputStream
ByteArrayOutputStream byteArrayOutputStream = new ByteArrayOutputStream();
document.save(byteArrayOutputStream);
// Prepare the modified PDF for download
ByteArrayInputStream inputStream =
new ByteArrayInputStream(byteArrayOutputStream.toByteArray());
InputStreamResource resource = new InputStreamResource(inputStream);
return resource;
} finally {
if (file != null && file.exists()) {
Files.delete(file.toPath());
// Prepare the modified PDF for download
ByteArrayInputStream inputStream =
new ByteArrayInputStream(byteArrayOutputStream.toByteArray());
InputStreamResource resource = new InputStreamResource(inputStream);
return resource;
}
}
}
@ -117,18 +110,20 @@ public class InvertFullColorStrategy extends ReplaceAndInvertColorStrategy {
private void invertImageColors(BufferedImage image) {
int width = image.getWidth();
int height = image.getHeight();
for (int x = 0; x < width; x++) {
for (int y = 0; y < height; y++) {
int rgba = image.getRGB(x, y);
Color color = new Color(rgba, true);
Color invertedColor =
new Color(
255 - color.getRed(),
255 - color.getGreen(),
255 - color.getBlue());
image.setRGB(x, y, invertedColor.getRGB());
}
int[] pixels = new int[width * height];
image.getRGB(0, 0, width, height, pixels, 0, width);
for (int i = 0; i < pixels.length; i++) {
int pixel = pixels[i];
int a = 0xff;
int r = (pixel >> 16) & 0xff;
int g = (pixel >> 8) & 0xff;
int b = pixel & 0xff;
pixels[i] = (a << 24) | ((255 - r) << 16) | ((255 - g) << 8) | (255 - b);
}
image.setRGB(0, 0, width, height, pixels, 0, width);
}
// Helper method to convert BufferedImage to InputStream
@ -137,4 +132,23 @@ public class InvertFullColorStrategy extends ReplaceAndInvertColorStrategy {
ImageIO.write(image, "png", file);
return file;
}
private static class TempFile implements AutoCloseable {
private final File file;
public TempFile(File file) {
this.file = file;
}
public File getFile() {
return file;
}
@Override
public void close() throws IOException {
if (file != null && file.exists()) {
Files.delete(file.toPath());
}
}
}
}

View File

@ -57,12 +57,20 @@ public class MergeController {
// Merges a list of PDDocument objects into a single PDDocument
public PDDocument mergeDocuments(List<PDDocument> documents) throws IOException {
PDDocument mergedDoc = pdfDocumentFactory.createNewDocument();
for (PDDocument doc : documents) {
for (PDPage page : doc.getPages()) {
mergedDoc.addPage(page);
boolean success = false;
try {
for (PDDocument doc : documents) {
for (PDPage page : doc.getPages()) {
mergedDoc.addPage(page);
}
}
success = true;
return mergedDoc;
} finally {
if (!success) {
mergedDoc.close();
}
}
return mergedDoc;
}
// Re-order files to match the explicit order provided by the front-end.
@ -363,9 +371,18 @@ public class MergeController {
// Save the modified document to a temporary file
outputTempFile = new TempFile(tempFileManager, ".pdf");
mergedDocument.save(outputTempFile.getFile());
try {
mergedDocument.save(outputTempFile.getFile());
} catch (Exception e) {
outputTempFile.close();
outputTempFile = null;
throw e;
}
}
} catch (Exception ex) {
if (outputTempFile != null) {
outputTempFile.close();
}
if (ex instanceof IOException && PdfErrorUtils.isCorruptedPdfError((IOException) ex)) {
log.warn("Corrupted PDF detected in merge pdf process: {}", ex.getMessage());
} else {

View File

@ -1,6 +1,5 @@
package stirling.software.SPDF.controller.api;
import java.io.ByteArrayOutputStream;
import java.io.IOException;
import java.io.OutputStream;
import java.nio.file.Files;
@ -60,8 +59,6 @@ public class SplitPdfBySectionsController {
+ " Input:PDF Output:ZIP-PDF Type:SISO")
public ResponseEntity<StreamingResponseBody> splitPdf(
@ModelAttribute SplitPdfBySectionsRequest request) throws Exception {
List<ByteArrayOutputStream> splitDocumentsBoas = new ArrayList<>();
MultipartFile file = request.getFileInput();
String pageNumbers = request.getPageNumbers();
SplitTypes splitMode =
@ -69,55 +66,182 @@ public class SplitPdfBySectionsController {
.map(SplitTypes::valueOf)
.orElse(SplitTypes.SPLIT_ALL);
PDDocument sourceDocument = pdfDocumentFactory.load(file);
try (PDDocument sourceDocument = pdfDocumentFactory.load(file)) {
Set<Integer> pagesToSplit =
getPagesToSplit(pageNumbers, splitMode, sourceDocument.getNumberOfPages());
Set<Integer> pagesToSplit =
getPagesToSplit(pageNumbers, splitMode, sourceDocument.getNumberOfPages());
// Process the PDF based on split parameters
int horiz = request.getHorizontalDivisions() + 1;
int verti = request.getVerticalDivisions() + 1;
boolean merge = Boolean.TRUE.equals(request.getMerge());
String filename = GeneralUtils.generateFilename(file.getOriginalFilename(), "_split");
// Process the PDF based on split parameters
int horiz = request.getHorizontalDivisions() + 1;
int verti = request.getVerticalDivisions() + 1;
boolean merge = Boolean.TRUE.equals(request.getMerge());
List<PDDocument> splitDocuments = splitPdfPages(sourceDocument, verti, horiz, pagesToSplit);
String filename = GeneralUtils.generateFilename(file.getOriginalFilename(), "_split.pdf");
if (merge) {
TempFile tempFile = new TempFile(tempFileManager, ".pdf");
try (PDDocument merged = pdfService.mergeDocuments(splitDocuments);
OutputStream out = Files.newOutputStream(tempFile.getPath())) {
merged.save(out);
for (PDDocument d : splitDocuments) d.close();
sourceDocument.close();
}
return WebResponseUtils.pdfFileToWebResponse(tempFile, filename + "_split.pdf");
}
for (PDDocument doc : splitDocuments) {
ByteArrayOutputStream baos = new ByteArrayOutputStream();
doc.save(baos);
doc.close();
splitDocumentsBoas.add(baos);
}
sourceDocument.close();
TempFile zipTempFile = new TempFile(tempFileManager, ".zip");
try (ZipOutputStream zipOut =
new ZipOutputStream(Files.newOutputStream(zipTempFile.getPath()))) {
int pageNum = 1;
for (int i = 0; i < splitDocumentsBoas.size(); i++) {
ByteArrayOutputStream baos = splitDocumentsBoas.get(i);
int sectionNum = (i % (horiz * verti)) + 1;
String fileName = filename + "_" + pageNum + "_" + sectionNum + ".pdf";
byte[] pdf = baos.toByteArray();
ZipEntry pdfEntry = new ZipEntry(fileName);
zipOut.putNextEntry(pdfEntry);
zipOut.write(pdf);
zipOut.closeEntry();
if (sectionNum == horiz * verti) pageNum++;
if (merge) {
TempFile tempFile = new TempFile(tempFileManager, ".pdf");
try (PDDocument mergedDoc = pdfDocumentFactory.createNewDocument();
OutputStream out = Files.newOutputStream(tempFile.getPath())) {
LayerUtility layerUtility = new LayerUtility(mergedDoc);
for (int pageIndex = 0;
pageIndex < sourceDocument.getNumberOfPages();
pageIndex++) {
if (pagesToSplit.contains(pageIndex)) {
addSplitPageToTarget(
sourceDocument,
pageIndex,
mergedDoc,
layerUtility,
horiz,
verti);
} else {
addPageToTarget(sourceDocument, pageIndex, mergedDoc, layerUtility);
}
}
mergedDoc.save(out);
}
return WebResponseUtils.pdfFileToWebResponse(tempFile, filename + ".pdf");
} else {
TempFile zipTempFile = new TempFile(tempFileManager, ".zip");
try (ZipOutputStream zipOut =
new ZipOutputStream(Files.newOutputStream(zipTempFile.getPath()))) {
for (int pageIndex = 0;
pageIndex < sourceDocument.getNumberOfPages();
pageIndex++) {
int pageNum = pageIndex + 1;
if (pagesToSplit.contains(pageIndex)) {
for (int i = 0; i < horiz; i++) {
for (int j = 0; j < verti; j++) {
try (PDDocument subDoc =
pdfDocumentFactory.createNewDocument()) {
LayerUtility subLayerUtility = new LayerUtility(subDoc);
addSingleSectionToTarget(
sourceDocument,
pageIndex,
subDoc,
subLayerUtility,
i,
j,
horiz,
verti);
int sectionNum = i * verti + j + 1;
String entryName =
filename
+ "_"
+ pageNum
+ "_"
+ sectionNum
+ ".pdf";
saveDocToZip(subDoc, zipOut, entryName);
}
}
}
} else {
try (PDDocument subDoc = pdfDocumentFactory.createNewDocument()) {
LayerUtility subLayerUtility = new LayerUtility(subDoc);
addPageToTarget(sourceDocument, pageIndex, subDoc, subLayerUtility);
String entryName = filename + "_" + pageNum + "_1.pdf";
saveDocToZip(subDoc, zipOut, entryName);
}
}
}
}
return WebResponseUtils.zipFileToWebResponse(zipTempFile, filename + ".zip");
}
}
return WebResponseUtils.zipFileToWebResponse(zipTempFile, filename + "_split.zip");
}
private void addPageToTarget(
PDDocument sourceDoc, int pageIndex, PDDocument targetDoc, LayerUtility layerUtility)
throws IOException {
PDPage sourcePage = sourceDoc.getPage(pageIndex);
PDPage newPage = new PDPage(sourcePage.getMediaBox());
targetDoc.addPage(newPage);
PDFormXObject form = layerUtility.importPageAsForm(sourceDoc, pageIndex);
try (PDPageContentStream contentStream =
new PDPageContentStream(targetDoc, newPage, AppendMode.APPEND, true, true)) {
contentStream.drawForm(form);
}
}
private void addSplitPageToTarget(
PDDocument sourceDoc,
int pageIndex,
PDDocument targetDoc,
LayerUtility layerUtility,
int totalHoriz,
int totalVert)
throws IOException {
PDPage sourcePage = sourceDoc.getPage(pageIndex);
PDRectangle mediaBox = sourcePage.getMediaBox();
float width = mediaBox.getWidth();
float height = mediaBox.getHeight();
float subPageWidth = width / totalHoriz;
float subPageHeight = height / totalVert;
PDFormXObject form = layerUtility.importPageAsForm(sourceDoc, pageIndex);
for (int i = 0; i < totalHoriz; i++) {
for (int j = 0; j < totalVert; j++) {
PDPage subPage = new PDPage(new PDRectangle(subPageWidth, subPageHeight));
targetDoc.addPage(subPage);
try (PDPageContentStream contentStream =
new PDPageContentStream(
targetDoc, subPage, AppendMode.APPEND, true, true)) {
float translateX = -subPageWidth * i;
float translateY = -subPageHeight * (totalVert - 1 - j);
contentStream.saveGraphicsState();
contentStream.addRect(0, 0, subPageWidth, subPageHeight);
contentStream.clip();
contentStream.transform(new Matrix(1, 0, 0, 1, translateX, translateY));
contentStream.drawForm(form);
contentStream.restoreGraphicsState();
}
}
}
}
private void addSingleSectionToTarget(
PDDocument sourceDoc,
int pageIndex,
PDDocument targetDoc,
LayerUtility layerUtility,
int horizIndex,
int vertIndex,
int totalHoriz,
int totalVert)
throws IOException {
PDPage sourcePage = sourceDoc.getPage(pageIndex);
PDRectangle mediaBox = sourcePage.getMediaBox();
float subPageWidth = mediaBox.getWidth() / totalHoriz;
float subPageHeight = mediaBox.getHeight() / totalVert;
PDPage subPage = new PDPage(new PDRectangle(subPageWidth, subPageHeight));
targetDoc.addPage(subPage);
PDFormXObject form = layerUtility.importPageAsForm(sourceDoc, pageIndex);
try (PDPageContentStream contentStream =
new PDPageContentStream(targetDoc, subPage, AppendMode.APPEND, true, true)) {
float translateX = -subPageWidth * horizIndex;
float translateY = -subPageHeight * (totalVert - 1 - vertIndex);
contentStream.saveGraphicsState();
contentStream.addRect(0, 0, subPageWidth, subPageHeight);
contentStream.clip();
contentStream.transform(new Matrix(1, 0, 0, 1, translateX, translateY));
contentStream.drawForm(form);
contentStream.restoreGraphicsState();
}
}
private void saveDocToZip(PDDocument doc, ZipOutputStream zipOut, String entryName)
throws IOException {
ZipEntry entry = new ZipEntry(entryName);
zipOut.putNextEntry(entry);
doc.save(zipOut);
zipOut.closeEntry();
}
// Based on the mode, get the pages that need to be split and return the pages set
@ -170,68 +294,4 @@ public class SplitPdfBySectionsController {
return pagesToSplit;
}
public List<PDDocument> splitPdfPages(
PDDocument document,
int horizontalDivisions,
int verticalDivisions,
Set<Integer> pagesToSplit)
throws IOException {
List<PDDocument> splitDocuments = new ArrayList<>();
int pageIndex = 0;
for (PDPage originalPage : document.getPages()) {
// If current page is not to split, add it to the splitDocuments directly.
if (!pagesToSplit.contains(pageIndex)) {
PDDocument newDoc = pdfDocumentFactory.createNewDocument();
newDoc.addPage(originalPage);
splitDocuments.add(newDoc);
} else {
// Otherwise, split current page.
PDRectangle originalMediaBox = originalPage.getMediaBox();
float width = originalMediaBox.getWidth();
float height = originalMediaBox.getHeight();
float subPageWidth = width / horizontalDivisions;
float subPageHeight = height / verticalDivisions;
LayerUtility layerUtility = new LayerUtility(document);
for (int i = 0; i < horizontalDivisions; i++) {
for (int j = 0; j < verticalDivisions; j++) {
PDDocument subDoc = new PDDocument();
PDPage subPage = new PDPage(new PDRectangle(subPageWidth, subPageHeight));
subDoc.addPage(subPage);
PDFormXObject form =
layerUtility.importPageAsForm(
document, document.getPages().indexOf(originalPage));
try (PDPageContentStream contentStream =
new PDPageContentStream(
subDoc, subPage, AppendMode.APPEND, true, true)) {
// Set clipping area and position
float translateX = -subPageWidth * i;
// float translateY = height - subPageHeight * (verticalDivisions - j);
float translateY = -subPageHeight * (verticalDivisions - 1 - j);
contentStream.saveGraphicsState();
contentStream.addRect(0, 0, subPageWidth, subPageHeight);
contentStream.clip();
contentStream.transform(new Matrix(1, 0, 0, 1, translateX, translateY));
// Draw the form
contentStream.drawForm(form);
contentStream.restoreGraphicsState();
}
splitDocuments.add(subDoc);
}
}
}
pageIndex++;
}
return splitDocuments;
}
}

View File

@ -124,124 +124,168 @@ public class SplitPdfBySizeController {
throws IOException {
log.debug("Starting handleSplitBySize with maxBytes={}", maxBytes);
PDDocument currentDoc =
pdfDocumentFactory.createNewDocumentBasedOnOldDocument(sourceDocument);
int fileIndex = 1;
int totalPages = sourceDocument.getNumberOfPages();
int pageAdded = 0;
class DocHolder implements AutoCloseable {
private PDDocument doc;
// Smart size check frequency - check more often with larger documents
int baseCheckFrequency = 5;
public DocHolder(PDDocument doc) {
this.doc = doc;
}
for (int pageIndex = 0; pageIndex < totalPages; pageIndex++) {
PDPage page = sourceDocument.getPage(pageIndex);
log.debug("Processing page {} of {}", pageIndex + 1, totalPages);
public PDDocument getDoc() {
return doc;
}
// Add the page to current document
PDPage newPage = new PDPage(page.getCOSObject());
currentDoc.addPage(newPage);
pageAdded++;
// Dynamic size checking based on document size and page count
boolean shouldCheckSize =
(pageAdded % baseCheckFrequency == 0)
|| (pageIndex == totalPages - 1)
|| (pageAdded >= 20); // Always check after 20 pages
if (shouldCheckSize) {
log.debug("Performing size check after {} pages", pageAdded);
ByteArrayOutputStream checkSizeStream = new ByteArrayOutputStream();
currentDoc.save(checkSizeStream);
long actualSize = checkSizeStream.size();
log.debug("Current document size: {} bytes (max: {} bytes)", actualSize, maxBytes);
if (actualSize > maxBytes) {
// We exceeded the limit - remove the last page and save
if (currentDoc.getNumberOfPages() > 1) {
currentDoc.removePage(currentDoc.getNumberOfPages() - 1);
pageIndex--; // Process this page again in the next document
log.debug("Size limit exceeded - removed last page");
public void setDoc(PDDocument doc) {
if (this.doc != null) {
try {
this.doc.close();
} catch (IOException e) {
log.error("Error closing document", e);
}
}
this.doc = doc;
}
@Override
public void close() throws IOException {
if (doc != null) {
doc.close();
}
}
}
int fileIndex = 1;
try (DocHolder holder =
new DocHolder(
pdfDocumentFactory.createNewDocumentBasedOnOldDocument(sourceDocument))) {
int totalPages = sourceDocument.getNumberOfPages();
int pageAdded = 0;
// Smart size check frequency - check more often with larger documents
int baseCheckFrequency = 5;
for (int pageIndex = 0; pageIndex < totalPages; pageIndex++) {
PDPage page = sourceDocument.getPage(pageIndex);
log.debug("Processing page {} of {}", pageIndex + 1, totalPages);
// Add the page to current document
PDPage newPage = new PDPage(page.getCOSObject());
holder.getDoc().addPage(newPage);
pageAdded++;
// Dynamic size checking based on document size and page count
boolean shouldCheckSize =
(pageAdded % baseCheckFrequency == 0)
|| (pageIndex == totalPages - 1)
|| (pageAdded >= 20); // Always check after 20 pages
if (shouldCheckSize) {
log.debug("Performing size check after {} pages", pageAdded);
long actualSize;
try (ByteArrayOutputStream checkSizeStream = new ByteArrayOutputStream()) {
holder.getDoc().save(checkSizeStream);
actualSize = checkSizeStream.size();
}
log.debug(
"Saving document with {} pages as part {}",
currentDoc.getNumberOfPages(),
fileIndex);
saveDocumentToZip(currentDoc, zipOut, baseFilename, fileIndex++);
currentDoc = new PDDocument();
pageAdded = 0;
} else if (pageIndex < totalPages - 1) {
// We're under the limit, calculate if we might fit more pages
// Try to predict how many more similar pages might fit
if (actualSize < maxBytes * 0.75 && pageAdded > 0) {
// Rather than using a ratio, look ahead to test actual upcoming pages
int pagesToLookAhead = Math.min(5, totalPages - pageIndex - 1);
"Current document size: {} bytes (max: {} bytes)",
actualSize,
maxBytes);
if (pagesToLookAhead > 0) {
log.debug(
"Testing {} upcoming pages for potential addition",
pagesToLookAhead);
if (actualSize > maxBytes) {
// We exceeded the limit - remove the last page and save
if (holder.getDoc().getNumberOfPages() > 1) {
holder.getDoc().removePage(holder.getDoc().getNumberOfPages() - 1);
pageIndex--; // Process this page again in the next document
log.debug("Size limit exceeded - removed last page");
}
// Create a temp document with current pages + look-ahead pages
PDDocument testDoc = new PDDocument();
// First copy existing pages
for (int i = 0; i < currentDoc.getNumberOfPages(); i++) {
testDoc.addPage(new PDPage(currentDoc.getPage(i).getCOSObject()));
}
log.debug(
"Saving document with {} pages as part {}",
holder.getDoc().getNumberOfPages(),
fileIndex);
saveDocumentToZip(holder.getDoc(), zipOut, baseFilename, fileIndex++);
holder.setDoc(new PDDocument());
pageAdded = 0;
} else if (pageIndex < totalPages - 1) {
// We're under the limit, calculate if we might fit more pages
// Try to predict how many more similar pages might fit
if (actualSize < maxBytes * 0.75 && pageAdded > 0) {
// Rather than using a ratio, look ahead to test actual upcoming pages
int pagesToLookAhead = Math.min(5, totalPages - pageIndex - 1);
// Try adding look-ahead pages one by one
int extraPagesAdded = 0;
for (int i = 0; i < pagesToLookAhead; i++) {
int testPageIndex = pageIndex + 1 + i;
PDPage testPage = sourceDocument.getPage(testPageIndex);
testDoc.addPage(new PDPage(testPage.getCOSObject()));
if (pagesToLookAhead > 0) {
log.debug(
"Testing {} upcoming pages for potential addition",
pagesToLookAhead);
// Check if we're still under size
ByteArrayOutputStream testStream = new ByteArrayOutputStream();
testDoc.save(testStream);
long testSize = testStream.size();
// Create a temp document with current pages + look-ahead pages
try (PDDocument testDoc = new PDDocument()) {
// First copy existing pages
for (int i = 0; i < holder.getDoc().getNumberOfPages(); i++) {
testDoc.addPage(
new PDPage(
holder.getDoc().getPage(i).getCOSObject()));
}
if (testSize <= maxBytes) {
extraPagesAdded++;
log.debug(
"Test: Can add page {} (size would be {})",
testPageIndex + 1,
testSize);
} else {
log.debug(
"Test: Cannot add page {} (size would be {})",
testPageIndex + 1,
testSize);
break;
// Try adding look-ahead pages one by one
int extraPagesAdded = 0;
for (int i = 0; i < pagesToLookAhead; i++) {
int testPageIndex = pageIndex + 1 + i;
PDPage testPage = sourceDocument.getPage(testPageIndex);
testDoc.addPage(new PDPage(testPage.getCOSObject()));
// Check if we're still under size
long testSize;
try (ByteArrayOutputStream testStream =
new ByteArrayOutputStream()) {
testDoc.save(testStream);
testSize = testStream.size();
}
if (testSize <= maxBytes) {
extraPagesAdded++;
log.debug(
"Test: Can add page {} (size would be {})",
testPageIndex + 1,
testSize);
} else {
log.debug(
"Test: Cannot add page {} (size would be {})",
testPageIndex + 1,
testSize);
break;
}
}
// Add the pages we verified would fit
if (extraPagesAdded > 0) {
log.debug(
"Adding {} verified pages ahead", extraPagesAdded);
for (int i = 0; i < extraPagesAdded; i++) {
int extraPageIndex = pageIndex + 1 + i;
PDPage extraPage =
sourceDocument.getPage(extraPageIndex);
holder.getDoc()
.addPage(new PDPage(extraPage.getCOSObject()));
}
pageIndex += extraPagesAdded;
pageAdded += extraPagesAdded;
}
}
}
testDoc.close();
// Add the pages we verified would fit
if (extraPagesAdded > 0) {
log.debug("Adding {} verified pages ahead", extraPagesAdded);
for (int i = 0; i < extraPagesAdded; i++) {
int extraPageIndex = pageIndex + 1 + i;
PDPage extraPage = sourceDocument.getPage(extraPageIndex);
currentDoc.addPage(new PDPage(extraPage.getCOSObject()));
}
pageIndex += extraPagesAdded;
pageAdded += extraPagesAdded;
}
}
}
}
}
}
// Save final document if it has any pages
if (currentDoc.getNumberOfPages() > 0) {
log.debug(
"Saving final document with {} pages as part {}",
currentDoc.getNumberOfPages(),
fileIndex);
saveDocumentToZip(currentDoc, zipOut, baseFilename, fileIndex++);
// Save final document if it has any pages
if (holder.getDoc() != null && holder.getDoc().getNumberOfPages() > 0) {
log.debug(
"Saving final document with {} pages as part {}",
holder.getDoc().getNumberOfPages(),
fileIndex);
saveDocumentToZip(holder.getDoc(), zipOut, baseFilename, fileIndex++);
holder.setDoc(null);
}
}
log.debug("Completed handleSplitBySize with {} document parts created", fileIndex - 1);
@ -252,96 +296,103 @@ public class SplitPdfBySizeController {
throws IOException {
log.debug("Starting handleSplitByPageCount with pageCount={}", pageCount);
int currentPageCount = 0;
log.debug("Creating initial output document");
PDDocument currentDoc;
try {
currentDoc = pdfDocumentFactory.createNewDocumentBasedOnOldDocument(sourceDocument);
log.debug("Successfully created initial output document");
} catch (Exception e) {
ExceptionUtils.logException("initial output document creation", e);
throw ExceptionUtils.createFileProcessingException("split", e);
}
PDDocument currentDoc = null;
int fileIndex = 1;
int pageIndex = 0;
int totalPages = sourceDocument.getNumberOfPages();
log.debug("Processing {} pages", totalPages);
try {
for (PDPage page : sourceDocument.getPages()) {
pageIndex++;
log.debug("Processing page {} of {}", pageIndex, totalPages);
log.debug("Creating initial output document");
try {
currentDoc = pdfDocumentFactory.createNewDocumentBasedOnOldDocument(sourceDocument);
log.debug("Successfully created initial output document");
} catch (Exception e) {
ExceptionUtils.logException("initial output document creation", e);
throw ExceptionUtils.createFileProcessingException("split", e);
}
try {
log.debug("Adding page {} to current document", pageIndex);
currentDoc.addPage(page);
log.debug("Successfully added page {} to current document", pageIndex);
} catch (Exception e) {
log.error("Error adding page {} to current document", pageIndex, e);
throw ExceptionUtils.createFileProcessingException("split", e);
}
int pageIndex = 0;
int totalPages = sourceDocument.getNumberOfPages();
log.debug("Processing {} pages", totalPages);
currentPageCount++;
log.debug("Current page count: {}/{}", currentPageCount, pageCount);
if (currentPageCount == pageCount) {
log.debug(
"Reached target page count ({}), saving current document as part {}",
pageCount,
fileIndex);
try {
saveDocumentToZip(currentDoc, zipOut, baseFilename, fileIndex++);
log.debug("Successfully saved document part {}", fileIndex - 1);
} catch (Exception e) {
log.error("Error saving document part {}", fileIndex - 1, e);
throw e;
}
try {
for (PDPage page : sourceDocument.getPages()) {
pageIndex++;
log.debug("Processing page {} of {}", pageIndex, totalPages);
try {
log.debug("Creating new document for next part");
currentDoc = new PDDocument();
log.debug("Successfully created new document");
log.debug("Adding page {} to current document", pageIndex);
currentDoc.addPage(page);
log.debug("Successfully added page {} to current document", pageIndex);
} catch (Exception e) {
log.error("Error creating new document for next part", e);
log.error("Error adding page {} to current document", pageIndex, e);
throw ExceptionUtils.createFileProcessingException("split", e);
}
currentPageCount = 0;
log.debug("Reset current page count to 0");
}
}
} catch (Exception e) {
log.error("Error iterating through pages", e);
throw ExceptionUtils.createFileProcessingException("split", e);
}
currentPageCount++;
log.debug("Current page count: {}/{}", currentPageCount, pageCount);
// Add the last document if it contains any pages
try {
if (currentDoc.getPages().getCount() != 0) {
log.debug(
"Saving final document with {} pages as part {}",
currentDoc.getPages().getCount(),
fileIndex);
try {
saveDocumentToZip(currentDoc, zipOut, baseFilename, fileIndex++);
log.debug("Successfully saved final document part {}", fileIndex - 1);
} catch (Exception e) {
log.error("Error saving final document part {}", fileIndex - 1, e);
throw e;
if (currentPageCount == pageCount) {
log.debug(
"Reached target page count ({}), saving current document as part {}",
pageCount,
fileIndex);
try {
saveDocumentToZip(currentDoc, zipOut, baseFilename, fileIndex++);
currentDoc = null; // Document is closed by saveDocumentToZip
log.debug("Successfully saved document part {}", fileIndex - 1);
} catch (Exception e) {
log.error("Error saving document part {}", fileIndex - 1, e);
throw e;
}
try {
log.debug("Creating new document for next part");
currentDoc = new PDDocument();
log.debug("Successfully created new document");
} catch (Exception e) {
log.error("Error creating new document for next part", e);
throw ExceptionUtils.createFileProcessingException("split", e);
}
currentPageCount = 0;
log.debug("Reset current page count to 0");
}
}
} else {
log.debug("Final document has no pages, skipping");
}
} catch (Exception e) {
log.error("Error checking or saving final document", e);
throw ExceptionUtils.createFileProcessingException("split", e);
} finally {
try {
log.debug("Closing final document");
currentDoc.close();
log.debug("Successfully closed final document");
} catch (Exception e) {
log.error("Error closing final document", e);
log.error("Error iterating through pages", e);
throw ExceptionUtils.createFileProcessingException("split", e);
}
// Add the last document if it contains any pages
try {
if (currentDoc != null && currentDoc.getPages().getCount() != 0) {
log.debug(
"Saving final document with {} pages as part {}",
currentDoc.getPages().getCount(),
fileIndex);
try {
saveDocumentToZip(currentDoc, zipOut, baseFilename, fileIndex++);
currentDoc = null; // Document is closed by saveDocumentToZip
log.debug("Successfully saved final document part {}", fileIndex - 1);
} catch (Exception e) {
log.error("Error saving final document part {}", fileIndex - 1, e);
throw e;
}
} else {
log.debug("Final document has no pages, skipping");
}
} catch (Exception e) {
log.error("Error checking or saving final document", e);
throw ExceptionUtils.createFileProcessingException("split", e);
}
} finally {
if (currentDoc != null) {
try {
log.debug("Closing remaining document");
currentDoc.close();
log.debug("Successfully closed remaining document");
} catch (Exception e) {
log.error("Error closing remaining document", e);
}
}
}
@ -367,42 +418,52 @@ public class SplitPdfBySizeController {
for (int i = 0; i < documentCount; i++) {
log.debug("Creating document {} of {}", i + 1, documentCount);
PDDocument currentDoc;
PDDocument currentDoc = null;
try {
currentDoc = pdfDocumentFactory.createNewDocumentBasedOnOldDocument(sourceDocument);
log.debug("Successfully created document {} of {}", i + 1, documentCount);
int pagesToAdd = pagesPerDocument + (i < extraPages ? 1 : 0);
log.debug("Adding {} pages to document {}", pagesToAdd, i + 1);
for (int j = 0; j < pagesToAdd; j++) {
try {
log.debug(
"Adding page {} (index {}) to document {}",
j + 1,
currentPageIndex,
i + 1);
currentDoc.addPage(sourceDocument.getPage(currentPageIndex));
log.debug("Successfully added page {} to document {}", j + 1, i + 1);
currentPageIndex++;
} catch (Exception e) {
log.error("Error adding page {} to document {}", j + 1, i + 1, e);
throw ExceptionUtils.createFileProcessingException("split", e);
}
}
try {
log.debug("Saving document {} with {} pages", i + 1, pagesToAdd);
saveDocumentToZip(currentDoc, zipOut, baseFilename, fileIndex++);
// saveDocumentToZip closes the document
currentDoc = null;
log.debug("Successfully saved document {}", i + 1);
} catch (Exception e) {
log.error("Error saving document {}", i + 1, e);
throw e;
}
} catch (Exception e) {
log.error("Error creating document {} of {}", i + 1, documentCount, e);
throw ExceptionUtils.createFileProcessingException("split", e);
}
int pagesToAdd = pagesPerDocument + (i < extraPages ? 1 : 0);
log.debug("Adding {} pages to document {}", pagesToAdd, i + 1);
for (int j = 0; j < pagesToAdd; j++) {
try {
log.debug(
"Adding page {} (index {}) to document {}",
j + 1,
currentPageIndex,
i + 1);
currentDoc.addPage(sourceDocument.getPage(currentPageIndex));
log.debug("Successfully added page {} to document {}", j + 1, i + 1);
currentPageIndex++;
} catch (Exception e) {
log.error("Error adding page {} to document {}", j + 1, i + 1, e);
throw ExceptionUtils.createFileProcessingException("split", e);
} finally {
if (currentDoc != null) {
try {
currentDoc.close();
} catch (IOException e) {
log.error("Error closing document {} of {}", i + 1, documentCount, e);
}
}
}
try {
log.debug("Saving document {} with {} pages", i + 1, pagesToAdd);
saveDocumentToZip(currentDoc, zipOut, baseFilename, fileIndex++);
log.debug("Successfully saved document {}", i + 1);
} catch (Exception e) {
log.error("Error saving document {}", i + 1, e);
throw e;
}
}
log.debug("Completed handleSplitByDocCount with {} documents created", documentCount);
@ -414,24 +475,15 @@ public class SplitPdfBySizeController {
log.debug("Starting saveDocumentToZip for document part {}", index);
ByteArrayOutputStream outStream = new ByteArrayOutputStream();
try {
try (PDDocument doc = document) {
log.debug("Saving document part {} to byte array", index);
document.save(outStream);
doc.save(outStream);
log.debug("Successfully saved document part {} ({} bytes)", index, outStream.size());
} catch (Exception e) {
log.error("Error saving document part {} to byte array", index, e);
throw ExceptionUtils.createFileProcessingException("split", e);
}
try {
log.debug("Closing document part {}", index);
document.close();
log.debug("Successfully closed document part {}", index);
} catch (Exception e) {
log.error("Error closing document part {}", index, e);
// Continue despite close error
}
try {
// Create a new zip entry
String entryName = baseFilename + "_" + index + ".pdf";

View File

@ -7,7 +7,6 @@ import java.io.IOException;
import org.apache.pdfbox.multipdf.LayerUtility;
import org.apache.pdfbox.pdmodel.PDDocument;
import org.apache.pdfbox.pdmodel.PDPage;
import org.apache.pdfbox.pdmodel.PDPageContentStream;
import org.apache.pdfbox.pdmodel.common.PDRectangle;
import org.apache.pdfbox.pdmodel.graphics.form.PDFormXObject;
import org.springframework.http.MediaType;
@ -47,53 +46,54 @@ public class ToSinglePageController {
throws IOException {
// Load the source document
PDDocument sourceDocument = pdfDocumentFactory.load(request);
try (PDDocument sourceDocument = pdfDocumentFactory.load(request)) {
// Calculate total height and max width
float totalHeight = 0;
float maxWidth = 0;
for (PDPage page : sourceDocument.getPages()) {
PDRectangle pageSize = page.getMediaBox();
totalHeight += pageSize.getHeight();
maxWidth = Math.max(maxWidth, pageSize.getWidth());
}
// Calculate total height and max width
float totalHeight = 0;
float maxWidth = 0;
for (PDPage page : sourceDocument.getPages()) {
PDRectangle pageSize = page.getMediaBox();
totalHeight += pageSize.getHeight();
maxWidth = Math.max(maxWidth, pageSize.getWidth());
// Create new document and page with calculated dimensions
try (PDDocument newDocument =
pdfDocumentFactory.createNewDocumentBasedOnOldDocument(sourceDocument)) {
PDPage newPage = new PDPage(new PDRectangle(maxWidth, totalHeight));
newDocument.addPage(newPage);
LayerUtility layerUtility = new LayerUtility(newDocument);
float yOffset = totalHeight;
// For each page, copy its content to the new page at the correct offset
try {
layerUtility.wrapInSaveRestore(newPage);
} catch (NullPointerException e) {
}
int pageIndex = 0;
for (PDPage page : sourceDocument.getPages()) {
PDFormXObject form = layerUtility.importPageAsForm(sourceDocument, pageIndex);
if (form != null) {
AffineTransform af =
AffineTransform.getTranslateInstance(
0, yOffset - page.getMediaBox().getHeight());
String defaultLayerName = "Layer" + pageIndex;
layerUtility.appendFormAsLayer(newPage, form, af, defaultLayerName);
}
yOffset -= page.getMediaBox().getHeight();
pageIndex++;
}
ByteArrayOutputStream baos = new ByteArrayOutputStream();
newDocument.save(baos);
byte[] result = baos.toByteArray();
return WebResponseUtils.bytesToWebResponse(
result,
GeneralUtils.generateFilename(
request.getFileInput().getOriginalFilename(), "_singlePage.pdf"));
}
}
// Create new document and page with calculated dimensions
PDDocument newDocument =
pdfDocumentFactory.createNewDocumentBasedOnOldDocument(sourceDocument);
PDPage newPage = new PDPage(new PDRectangle(maxWidth, totalHeight));
newDocument.addPage(newPage);
// Initialize the content stream of the new page
PDPageContentStream contentStream = new PDPageContentStream(newDocument, newPage);
contentStream.close();
LayerUtility layerUtility = new LayerUtility(newDocument);
float yOffset = totalHeight;
// For each page, copy its content to the new page at the correct offset
int pageIndex = 0;
for (PDPage page : sourceDocument.getPages()) {
PDFormXObject form = layerUtility.importPageAsForm(sourceDocument, pageIndex);
AffineTransform af =
AffineTransform.getTranslateInstance(
0, yOffset - page.getMediaBox().getHeight());
layerUtility.wrapInSaveRestore(newPage);
String defaultLayerName = "Layer" + pageIndex;
layerUtility.appendFormAsLayer(newPage, form, af, defaultLayerName);
yOffset -= page.getMediaBox().getHeight();
pageIndex++;
}
ByteArrayOutputStream baos = new ByteArrayOutputStream();
newDocument.save(baos);
newDocument.close();
sourceDocument.close();
byte[] result = baos.toByteArray();
return WebResponseUtils.bytesToWebResponse(
result,
GeneralUtils.generateFilename(
request.getFileInput().getOriginalFilename(), "_singlePage.pdf"));
}
}

View File

@ -91,8 +91,7 @@ public class UIDataController {
LicensesData data = new LicensesData();
Resource resource = new ClassPathResource("static/3rdPartyLicenses.json");
try {
InputStream is = resource.getInputStream();
try (InputStream is = resource.getInputStream()) {
String json = new String(is.readAllBytes(), StandardCharsets.UTF_8);
ObjectMapper mapper = new ObjectMapper();
Map<String, List<Dependency>> licenseData =

View File

@ -380,7 +380,7 @@ public class ConvertImgPDFController {
/**
* Rearranges the pages of the given PDF document based on the specified page order.
*
* @param pdfBytes The byte array of the original PDF file.
* @param pdfFile The MultipartFile of the original PDF file.
* @param pageOrderArr An array of page numbers indicating the new order.
* @return A byte array of the rearranged PDF.
* @throws IOException If an error occurs while processing the PDF.
@ -388,35 +388,31 @@ public class ConvertImgPDFController {
private byte[] rearrangePdfPages(MultipartFile pdfFile, String[] pageOrderArr)
throws IOException {
// Load the input PDF
PDDocument document = pdfDocumentFactory.load(pdfFile);
int totalPages = document.getNumberOfPages();
List<Integer> newPageOrder = GeneralUtils.parsePageList(pageOrderArr, totalPages, false);
try (PDDocument document = pdfDocumentFactory.load(pdfFile);
ByteArrayOutputStream baos = new ByteArrayOutputStream()) {
int totalPages = document.getNumberOfPages();
List<Integer> newPageOrder =
GeneralUtils.parsePageList(pageOrderArr, totalPages, false);
// Create a new list to hold the pages in the new order
List<PDPage> newPages = new ArrayList<>();
for (int pageIndex : newPageOrder) {
newPages.add(document.getPage(pageIndex));
}
// Create a new list to hold the pages in the new order
List<PDPage> newPages = new ArrayList<>();
for (int pageIndex : newPageOrder) {
newPages.add(document.getPage(pageIndex));
}
// Remove all the pages from the original document
for (int i = document.getNumberOfPages() - 1; i >= 0; i--) {
document.removePage(i);
}
// Remove all the pages from the original document
for (int i = document.getNumberOfPages() - 1; i >= 0; i--) {
document.removePage(i);
}
// Add the pages in the new order
for (PDPage page : newPages) {
document.addPage(page);
}
// Add the pages in the new order
for (PDPage page : newPages) {
document.addPage(page);
}
// Convert PDDocument to byte array
byte[] newPdfBytes;
try (ByteArrayOutputStream baos = new ByteArrayOutputStream()) {
// Convert PDDocument to byte array
document.save(baos);
newPdfBytes = baos.toByteArray();
} finally {
document.close();
return baos.toByteArray();
}
return newPdfBytes;
}
}

View File

@ -1,5 +1,6 @@
package stirling.software.SPDF.controller.api.converters;
import java.io.ByteArrayOutputStream;
import java.io.IOException;
import java.net.URI;
import java.net.http.HttpClient;
@ -106,7 +107,6 @@ public class ConvertWebsiteToPDF {
Path tempOutputFile = null;
Path tempHtmlInput = null;
PDDocument doc = null;
try {
// Download the remote content first to ensure we don't allow dangerous schemes
String htmlContent = fetchRemoteHtml(URL);
@ -140,18 +140,14 @@ public class ConvertWebsiteToPDF {
.runCommandWithOutputHandling(command);
// Load the PDF using pdfDocumentFactory
doc = pdfDocumentFactory.load(tempOutputFile.toFile());
try (PDDocument doc = pdfDocumentFactory.load(tempOutputFile.toFile());
ByteArrayOutputStream baos = new ByteArrayOutputStream()) {
// Convert URL to a safe filename
String outputFilename = convertURLToFileName(URL);
// Convert URL to a safe filename
String outputFilename = convertURLToFileName(URL);
ResponseEntity<byte[]> response =
WebResponseUtils.pdfDocToWebResponse(doc, outputFilename);
if (response == null) {
// Defensive fallback - should not happen but avoids null returns breaking tests
return ResponseEntity.ok(new byte[0]);
doc.save(baos);
return WebResponseUtils.baosToWebResponse(baos, outputFilename);
}
return response;
} finally {
if (tempHtmlInput != null) {
try {

View File

@ -119,13 +119,9 @@ public class AutoSplitPdfController {
MultipartFile file = request.getFileInput();
boolean duplexMode = Boolean.TRUE.equals(request.getDuplexMode());
PDDocument document = null;
List<PDDocument> splitDocuments = new ArrayList<>();
TempFile outputTempFile = null;
try {
outputTempFile = new TempFile(tempFileManager, ".zip");
document = pdfDocumentFactory.load(file.getInputStream());
try (TempFile outputTempFile = new TempFile(tempFileManager, ".zip");
PDDocument document = pdfDocumentFactory.load(file.getInputStream())) {
PDFRenderer pdfRenderer = new PDFRenderer(document);
pdfRenderer.setSubsamplingAllowed(true);
@ -201,15 +197,7 @@ public class AutoSplitPdfController {
log.error("Error in auto split", e);
throw e;
} finally {
// Clean up resources
if (document != null) {
try {
document.close();
} catch (IOException e) {
log.error("Error closing main PDDocument", e);
}
}
// Clean up split documents
for (PDDocument splitDoc : splitDocuments) {
try {
splitDoc.close();
@ -217,10 +205,6 @@ public class AutoSplitPdfController {
log.error("Error closing split PDDocument", e);
}
}
if (outputTempFile != null) {
outputTempFile.close();
}
}
}
}

View File

@ -53,18 +53,20 @@ public class BlankPageController {
// Convert to binary image based on the threshold
int whitePixels = 0;
int totalPixels = image.getWidth() * image.getHeight();
int width = image.getWidth();
int height = image.getHeight();
int[] pixels = new int[width * height];
for (int i = 0; i < image.getHeight(); i++) {
for (int j = 0; j < image.getWidth(); j++) {
int color = image.getRGB(j, i) & 0xFF;
if (color >= 255 - threshold) {
whitePixels++;
}
image.getRGB(0, 0, width, height, pixels, 0, width);
for (int pixel : pixels) {
int blue = pixel & 0xFF;
if (blue >= 255 - threshold) {
whitePixels++;
}
}
double whitePixelPercentage = (whitePixels / (double) totalPixels) * 100;
double whitePixelPercentage = (whitePixels / (double) (width * height)) * 100;
log.info(
String.format(
Locale.ROOT,

View File

@ -77,38 +77,40 @@ public class PrintFileController {
log.info("Selected Printer: {}", selectedService.getName());
if (MediaType.APPLICATION_PDF_VALUE.equals(contentType)) {
PDDocument document = Loader.loadPDF(file.getBytes());
PrinterJob job = PrinterJob.getPrinterJob();
job.setPrintService(selectedService);
job.setPageable(new PDFPageable(document));
job.print();
document.close();
try (PDDocument document = Loader.loadPDF(file.getBytes())) {
PrinterJob job = PrinterJob.getPrinterJob();
job.setPrintService(selectedService);
job.setPageable(new PDFPageable(document));
job.print();
}
} else if (contentType.startsWith("image/")) {
BufferedImage image = ImageIO.read(file.getInputStream());
PrinterJob job = PrinterJob.getPrinterJob();
job.setPrintService(selectedService);
job.setPrintable(
new Printable() {
public int print(
Graphics graphics, PageFormat pageFormat, int pageIndex)
throws PrinterException {
if (pageIndex != 0) {
return NO_SUCH_PAGE;
try (var inputStream = file.getInputStream()) {
BufferedImage image = ImageIO.read(inputStream);
PrinterJob job = PrinterJob.getPrinterJob();
job.setPrintService(selectedService);
job.setPrintable(
new Printable() {
public int print(
Graphics graphics, PageFormat pageFormat, int pageIndex)
throws PrinterException {
if (pageIndex != 0) {
return NO_SUCH_PAGE;
}
Graphics2D g2d = (Graphics2D) graphics;
g2d.translate(
pageFormat.getImageableX(), pageFormat.getImageableY());
g2d.drawImage(
image,
0,
0,
(int) pageFormat.getImageableWidth(),
(int) pageFormat.getImageableHeight(),
null);
return PAGE_EXISTS;
}
Graphics2D g2d = (Graphics2D) graphics;
g2d.translate(
pageFormat.getImageableX(), pageFormat.getImageableY());
g2d.drawImage(
image,
0,
0,
(int) pageFormat.getImageableWidth(),
(int) pageFormat.getImageableHeight(),
null);
return PAGE_EXISTS;
}
});
job.print();
});
job.print();
}
}
return new ResponseEntity<>(
"File printed successfully to " + selectedService.getName(), HttpStatus.OK);

View File

@ -133,13 +133,13 @@ public class CertSignController {
signature.setReason(reason);
signature.setSignDate(Calendar.getInstance()); // PDFBox requires Calendar
if (Boolean.TRUE.equals(showSignature)) {
SignatureOptions signatureOptions = new SignatureOptions();
signatureOptions.setVisualSignature(
instance.createVisibleSignature(doc, signature, pageNumber, showLogo));
signatureOptions.setPage(pageNumber);
doc.addSignature(signature, instance, signatureOptions);
try (SignatureOptions signatureOptions = new SignatureOptions()) {
signatureOptions.setVisualSignature(
instance.createVisibleSignature(doc, signature, pageNumber, showLogo));
signatureOptions.setPage(pageNumber);
doc.addSignature(signature, instance, signatureOptions);
}
} else {
doc.addSignature(signature, instance);
}

View File

@ -45,8 +45,7 @@ public class HomeWebController {
public String licensesForm(Model model) {
model.addAttribute("currentPage", "licenses");
Resource resource = new ClassPathResource("static/3rdPartyLicenses.json");
try {
InputStream is = resource.getInputStream();
try (InputStream is = resource.getInputStream()) {
String json = new String(is.readAllBytes(), StandardCharsets.UTF_8);
ObjectMapper mapper = new ObjectMapper();
Map<String, List<Dependency>> data = mapper.readValue(json, new TypeReference<>() {});

View File

@ -423,38 +423,38 @@ public class CertificateValidationService {
private void loadBundledMozillaCACerts() {
try {
log.info("Loading bundled Mozilla CA certificates from resources");
InputStream certStream =
getClass().getClassLoader().getResourceAsStream("certs/cacert.pem");
if (certStream == null) {
log.warn("Bundled Mozilla CA certificate file not found in resources");
return;
}
try (InputStream certStream =
getClass().getClassLoader().getResourceAsStream("certs/cacert.pem")) {
if (certStream == null) {
log.warn("Bundled Mozilla CA certificate file not found in resources");
return;
}
CertificateFactory cf = CertificateFactory.getInstance("X.509");
Collection<? extends Certificate> certs = cf.generateCertificates(certStream);
certStream.close();
CertificateFactory cf = CertificateFactory.getInstance("X.509");
Collection<? extends Certificate> certs = cf.generateCertificates(certStream);
int loadedCount = 0;
int skippedCount = 0;
int loadedCount = 0;
int skippedCount = 0;
for (Certificate cert : certs) {
if (cert instanceof X509Certificate x509) {
// Only add CA certificates to trust anchors
if (isCA(x509)) {
String fingerprint = sha256Fingerprint(x509);
String alias = "mozilla-" + fingerprint;
signingTrustAnchors.setCertificateEntry(alias, x509);
loadedCount++;
} else {
skippedCount++;
for (Certificate cert : certs) {
if (cert instanceof X509Certificate x509) {
// Only add CA certificates to trust anchors
if (isCA(x509)) {
String fingerprint = sha256Fingerprint(x509);
String alias = "mozilla-" + fingerprint;
signingTrustAnchors.setCertificateEntry(alias, x509);
loadedCount++;
} else {
skippedCount++;
}
}
}
}
log.info(
"Loaded {} Mozilla CA certificates as trust anchors (skipped {} non-CA certs)",
loadedCount,
skippedCount);
log.info(
"Loaded {} Mozilla CA certificates as trust anchors (skipped {} non-CA certs)",
loadedCount,
skippedCount);
}
} catch (Exception e) {
log.error("Failed to load bundled Mozilla CA certificates: {}", e.getMessage(), e);
}

View File

@ -0,0 +1,118 @@
package stirling.software.SPDF.controller.api;
import static org.assertj.core.api.Assertions.assertThat;
import static org.mockito.ArgumentMatchers.any;
import static org.mockito.ArgumentMatchers.anyString;
import static org.mockito.Mockito.when;
import java.io.IOException;
import java.nio.file.Files;
import java.nio.file.Path;
import org.apache.pdfbox.Loader;
import org.apache.pdfbox.pdmodel.PDDocument;
import org.apache.pdfbox.pdmodel.PDPage;
import org.apache.pdfbox.pdmodel.common.PDRectangle;
import org.junit.jupiter.api.BeforeEach;
import org.junit.jupiter.api.DisplayName;
import org.junit.jupiter.api.Test;
import org.junit.jupiter.api.extension.ExtendWith;
import org.junit.jupiter.api.io.TempDir;
import org.mockito.InjectMocks;
import org.mockito.Mock;
import org.mockito.junit.jupiter.MockitoExtension;
import org.springframework.http.HttpStatus;
import org.springframework.http.MediaType;
import org.springframework.http.ResponseEntity;
import org.springframework.mock.web.MockMultipartFile;
import stirling.software.SPDF.model.api.general.SplitPdfBySizeOrCountRequest;
import stirling.software.common.service.CustomPDFDocumentFactory;
import stirling.software.common.util.TempFileManager;
@ExtendWith(MockitoExtension.class)
class SplitPdfBySizeControllerTest {
@TempDir Path tempDir;
@Mock private CustomPDFDocumentFactory pdfDocumentFactory;
@Mock private TempFileManager tempFileManager;
@InjectMocks private SplitPdfBySizeController controller;
@BeforeEach
void setUp() throws IOException {
when(tempFileManager.createTempFile(anyString()))
.thenAnswer(
invocation -> {
String suffix = invocation.getArgument(0);
return Files.createTempFile(tempDir, "test", suffix).toFile();
});
}
@Test
@DisplayName("Should split by page count successfully")
void shouldSplitByPageCount() throws Exception {
byte[] pdfBytes;
try (PDDocument doc = new PDDocument()) {
for (int i = 0; i < 5; i++) {
doc.addPage(new PDPage(PDRectangle.A4));
}
Path pdfPath = tempDir.resolve("input.pdf");
doc.save(pdfPath.toFile());
pdfBytes = Files.readAllBytes(pdfPath);
}
MockMultipartFile file =
new MockMultipartFile(
"fileInput", "input.pdf", MediaType.APPLICATION_PDF_VALUE, pdfBytes);
SplitPdfBySizeOrCountRequest request = new SplitPdfBySizeOrCountRequest();
request.setFileInput(file);
request.setSplitType(1); // Page count
request.setSplitValue("2");
when(pdfDocumentFactory.load(any(byte[].class)))
.thenAnswer(inv -> Loader.loadPDF((byte[]) inv.getArgument(0)));
when(pdfDocumentFactory.createNewDocumentBasedOnOldDocument(any(PDDocument.class)))
.thenAnswer(inv -> new PDDocument());
ResponseEntity<byte[]> response = controller.autoSplitPdf(request);
assertThat(response.getStatusCode()).isEqualTo(HttpStatus.OK);
assertThat(response.getBody()).isNotEmpty();
assertThat(response.getHeaders().getContentType())
.isEqualTo(MediaType.APPLICATION_OCTET_STREAM);
}
@Test
@DisplayName("Should split by document count successfully")
void shouldSplitByDocCount() throws Exception {
byte[] pdfBytes;
try (PDDocument doc = new PDDocument()) {
for (int i = 0; i < 6; i++) {
doc.addPage(new PDPage(PDRectangle.A4));
}
Path pdfPath = tempDir.resolve("input.pdf");
doc.save(pdfPath.toFile());
pdfBytes = Files.readAllBytes(pdfPath);
}
MockMultipartFile file =
new MockMultipartFile(
"fileInput", "input.pdf", MediaType.APPLICATION_PDF_VALUE, pdfBytes);
SplitPdfBySizeOrCountRequest request = new SplitPdfBySizeOrCountRequest();
request.setFileInput(file);
request.setSplitType(2); // Document count
request.setSplitValue("3"); // Split into 3 docs (2 pages each)
when(pdfDocumentFactory.load(any(byte[].class)))
.thenAnswer(inv -> Loader.loadPDF((byte[]) inv.getArgument(0)));
when(pdfDocumentFactory.createNewDocumentBasedOnOldDocument(any(PDDocument.class)))
.thenAnswer(inv -> new PDDocument());
ResponseEntity<byte[]> response = controller.autoSplitPdf(request);
assertThat(response.getStatusCode()).isEqualTo(HttpStatus.OK);
assertThat(response.getBody()).isNotEmpty();
}
}

View File

@ -6,6 +6,7 @@ import static org.mockito.ArgumentMatchers.anyString;
import static org.mockito.ArgumentMatchers.eq;
import static org.mockito.Mockito.when;
import java.io.ByteArrayOutputStream;
import java.io.File;
import java.io.IOException;
import java.lang.reflect.Method;
@ -189,9 +190,12 @@ public class ConvertWebsiteToPdfTest {
when(mockExec.runCommandWithOutputHandling(cmdCaptor.capture()))
.thenReturn(dummyResult);
// Mock WebResponseUtils
ResponseEntity<byte[]> fakeResponse = ResponseEntity.ok(new byte[0]);
wr.when(() -> WebResponseUtils.pdfDocToWebResponse(any(PDDocument.class), anyString()))
wr.when(
() ->
WebResponseUtils.baosToWebResponse(
any(ByteArrayOutputStream.class), any()))
.thenReturn(fakeResponse);
// Act
@ -261,7 +265,10 @@ public class ConvertWebsiteToPdfTest {
// WebResponseUtils
ResponseEntity<byte[]> fakeResponse = ResponseEntity.ok(new byte[0]);
wr.when(() -> WebResponseUtils.pdfDocToWebResponse(any(PDDocument.class), anyString()))
wr.when(
() ->
WebResponseUtils.baosToWebResponse(
any(ByteArrayOutputStream.class), any()))
.thenReturn(fakeResponse);
// Act: should not throw and should return a Response

View File

@ -3,8 +3,6 @@ package stirling.software.proprietary.security.session;
import java.time.Duration;
import java.time.Instant;
import java.util.ArrayList;
import java.util.Collections;
import java.util.Comparator;
import java.util.Date;
import java.util.List;
import java.util.Optional;
@ -182,15 +180,7 @@ public class SessionPersistentRegistry implements SessionRegistry {
}
// Sort sessions by lastRequest in descending order
Collections.sort(
allSessions,
new Comparator<SessionEntity>() {
@Override
public int compare(SessionEntity s1, SessionEntity s2) {
// Sort by lastRequest in descending order
return s2.getLastRequest().compareTo(s1.getLastRequest());
}
});
allSessions.sort((s1, s2) -> s2.getLastRequest().compareTo(s1.getLastRequest()));
// The first session in the list is the latest session for the given principal name
return Optional.of(allSessions.get(0));