Merge branch 'main' into main

This commit is contained in:
Calvin Li 2025-07-14 15:03:06 -04:00 committed by GitHub
commit 357db930fb
No known key found for this signature in database
GPG Key ID: B5690EEEBB952194
29 changed files with 2621 additions and 742 deletions

View File

@ -119,7 +119,9 @@
"EditorConfig.EditorConfig", // EditorConfig support for maintaining consistent coding styles "EditorConfig.EditorConfig", // EditorConfig support for maintaining consistent coding styles
"ms-azuretools.vscode-docker", // Docker extension for Visual Studio Code "ms-azuretools.vscode-docker", // Docker extension for Visual Studio Code
"charliermarsh.ruff", // Ruff extension for Ruff language support "charliermarsh.ruff", // Ruff extension for Ruff language support
"github.vscode-github-actions" // GitHub Actions extension for Visual Studio Code "github.vscode-github-actions", // GitHub Actions extension for Visual Studio Code
"stylelint.vscode-stylelint", // Stylelint extension for CSS and SCSS linting
"redhat.vscode-yaml" // YAML extension for Visual Studio Code
] ]
} }
}, },

View File

@ -2,37 +2,46 @@ version: 1
labels: labels:
- label: "Bugfix" - label: "Bugfix"
title: '^fix:.*' title: '^fix(\([^)]*\))?:|^fix:.*'
- label: "enhancement" - label: "enhancement"
title: '^feat:.*' title: '^feat(\([^)]*\))?:|^feat:.*'
- label: "build" - label: "build"
title: '^build:.*' title: '^build(\([^)]*\))?:|^build:.*'
- label: "chore" - label: "chore"
title: '^chore:.*' title: '^chore(\([^)]*\))?:|^chore:.*'
- label: "ci" - label: "ci"
title: '^ci:.*' title: '^ci(\([^)]*\))?:|^ci:.*'
- label: "ci"
title: '^.*\(ci\):.*'
- label: "perf" - label: "perf"
title: '^perf:.*' title: '^perf(\([^)]*\))?:|^perf:.*'
- label: "refactor" - label: "refactor"
title: '^refactor:.*' title: '^refactor(\([^)]*\))?:|^refactor:.*'
- label: "revert" - label: "revert"
title: '^revert:.*' title: '^revert(\([^)]*\))?:|^revert:.*'
- label: "style" - label: "style"
title: '^style:.*' title: '^style(\([^)]*\))?:|^style:.*'
- label: "Documentation" - label: "Documentation"
title: '^docs:.*' title: '^docs(\([^)]*\))?:|^docs:.*'
- label: "dependencies"
title: '^deps(\([^)]*\))?:|^deps:.*'
- label: "dependencies"
title: '^.*\(deps\):.*'
- label: 'API' - label: 'API'
title: '.*openapi.*' title: '.*openapi.*|.*swagger.*|.*api.*'
- label: 'Translation' - label: 'Translation'
files: files:
@ -81,6 +90,7 @@ labels:
- 'stirling-pdf/src/main/java/stirling/software/SPDF/controller/web/MetricsController.java' - 'stirling-pdf/src/main/java/stirling/software/SPDF/controller/web/MetricsController.java'
- 'stirling-pdf/src/main/java/stirling/software/SPDF/controller/api/.*' - 'stirling-pdf/src/main/java/stirling/software/SPDF/controller/api/.*'
- 'stirling-pdf/src/main/java/stirling/software/SPDF/model/api/.*' - 'stirling-pdf/src/main/java/stirling/software/SPDF/model/api/.*'
- 'stirling-pdf/src/main/java/stirling/software/SPDF/service/ApiDocService.java'
- 'proprietary/src/main/java/stirling/software/proprietary/security/controller/api/.*' - 'proprietary/src/main/java/stirling/software/proprietary/security/controller/api/.*'
- 'scripts/png_to_webp.py' - 'scripts/png_to_webp.py'
- 'split_photos.py' - 'split_photos.py'
@ -116,6 +126,7 @@ labels:
- '.pre-commit-config' - '.pre-commit-config'
- '.github/workflows/pre_commit.yml' - '.github/workflows/pre_commit.yml'
- 'devGuide/.*' - 'devGuide/.*'
- 'devTools/.*'
- label: 'Test' - label: 'Test'
files: files:

3
.github/labels.yml vendored
View File

@ -175,3 +175,6 @@
description: "This PR changes 1000+ lines ignoring generated files." description: "This PR changes 1000+ lines ignoring generated files."
- name: "to research" - name: "to research"
color: "FBCA04" color: "FBCA04"
- name: "pr-deployed"
color: "00FF00"
description: "Pull request has been deployed to a test environment"

View File

@ -6,15 +6,13 @@ on:
permissions: permissions:
contents: read contents: read
issues: write # Required for adding reactions to comments pull-requests: read
pull-requests: read # Required for reading PR information
jobs: jobs:
check-comment: check-comment:
runs-on: ubuntu-latest runs-on: ubuntu-latest
permissions: permissions:
issues: write issues: write
pull-requests: read
if: | if: |
github.event.issue.pull_request && github.event.issue.pull_request &&
( (
@ -47,10 +45,14 @@ jobs:
with: with:
egress-policy: audit egress-policy: audit
# Generate GitHub App token - name: Checkout PR
- name: Generate GitHub App Token uses: actions/checkout@11bd71901bbe5b1630ceea73d27597364c9af683 # v4.2.2
id: generate-token
uses: actions/create-github-app-token@df432ceedc7162793a195dd1713ff69aefc7379e # v2.0.6 - name: Setup GitHub App Bot
if: github.actor != 'dependabot[bot]'
id: setup-bot
uses: ./.github/actions/setup-bot
continue-on-error: true
with: with:
app-id: ${{ secrets.GH_APP_ID }} app-id: ${{ secrets.GH_APP_ID }}
private-key: ${{ secrets.GH_APP_PRIVATE_KEY }} private-key: ${{ secrets.GH_APP_PRIVATE_KEY }}
@ -123,7 +125,7 @@ jobs:
id: add-eyes-reaction id: add-eyes-reaction
uses: actions/github-script@60a0d83039c74a4aee543508d2ffcb1c3799cdea # v7.0.1 uses: actions/github-script@60a0d83039c74a4aee543508d2ffcb1c3799cdea # v7.0.1
with: with:
github-token: ${{ steps.generate-token.outputs.token }} github-token: ${{ steps.setup-bot.outputs.token }}
script: | script: |
console.log(`Adding eyes reaction to comment ID: ${context.payload.comment.id}`); console.log(`Adding eyes reaction to comment ID: ${context.payload.comment.id}`);
try { try {
@ -145,8 +147,8 @@ jobs:
needs: check-comment needs: check-comment
runs-on: ubuntu-latest runs-on: ubuntu-latest
permissions: permissions:
contents: read
issues: write issues: write
pull-requests: write
steps: steps:
- name: Harden Runner - name: Harden Runner
@ -154,9 +156,14 @@ jobs:
with: with:
egress-policy: audit egress-policy: audit
- name: Generate GitHub App Token - name: Checkout PR
id: generate-token uses: actions/checkout@11bd71901bbe5b1630ceea73d27597364c9af683 # v4.2.2
uses: actions/create-github-app-token@df432ceedc7162793a195dd1713ff69aefc7379e # v2.0.6
- name: Setup GitHub App Bot
if: github.actor != 'dependabot[bot]'
id: setup-bot
uses: ./.github/actions/setup-bot
continue-on-error: true
with: with:
app-id: ${{ secrets.GH_APP_ID }} app-id: ${{ secrets.GH_APP_ID }}
private-key: ${{ secrets.GH_APP_PRIVATE_KEY }} private-key: ${{ secrets.GH_APP_PRIVATE_KEY }}
@ -166,7 +173,7 @@ jobs:
with: with:
repository: ${{ needs.check-comment.outputs.pr_repository }} repository: ${{ needs.check-comment.outputs.pr_repository }}
ref: ${{ needs.check-comment.outputs.pr_ref }} ref: ${{ needs.check-comment.outputs.pr_ref }}
token: ${{ secrets.GITHUB_TOKEN }} token: ${{ steps.setup-bot.outputs.token }}
- name: Set up JDK - name: Set up JDK
uses: actions/setup-java@c5195efecf7bdfc987ee8bae7a71cb8b11521c00 # v4.7.1 uses: actions/setup-java@c5195efecf7bdfc987ee8bae7a71cb8b11521c00 # v4.7.1
@ -188,12 +195,6 @@ jobs:
- name: Set up Docker Buildx - name: Set up Docker Buildx
uses: docker/setup-buildx-action@e468171a9de216ec08956ac3ada2f0791b6bd435 # v3.11.1 uses: docker/setup-buildx-action@e468171a9de216ec08956ac3ada2f0791b6bd435 # v3.11.1
- name: Get version number
id: versionNumber
run: |
VERSION=$(grep "^version =" build.gradle | awk -F'"' '{print $2}')
echo "versionNumber=$VERSION" >> $GITHUB_OUTPUT
- name: Login to Docker Hub - name: Login to Docker Hub
uses: docker/login-action@74a5d142397b4f367a81961eba4e8cd7edddf772 # v3.4.0 uses: docker/login-action@74a5d142397b4f367a81961eba4e8cd7edddf772 # v3.4.0
with: with:
@ -297,7 +298,7 @@ jobs:
if: success() if: success()
uses: actions/github-script@60a0d83039c74a4aee543508d2ffcb1c3799cdea # v7.0.1 uses: actions/github-script@60a0d83039c74a4aee543508d2ffcb1c3799cdea # v7.0.1
with: with:
github-token: ${{ steps.generate-token.outputs.token }} github-token: ${{ steps.setup-bot.outputs.token }}
script: | script: |
console.log(`Adding rocket reaction to comment ID: ${{ needs.check-comment.outputs.comment_id }}`); console.log(`Adding rocket reaction to comment ID: ${{ needs.check-comment.outputs.comment_id }}`);
try { try {
@ -313,11 +314,26 @@ jobs:
console.error(error); console.error(error);
} }
// add label to PR
const prNumber = ${{ needs.check-comment.outputs.pr_number }};
try {
await github.rest.issues.addLabels({
owner: context.repo.owner,
repo: context.repo.repo,
issue_number: prNumber,
labels: ['pr-deployed']
});
console.log(`Added 'pr-deployed' label to PR #${prNumber}`);
} catch (error) {
console.error(`Failed to add label to PR: ${error.message}`);
console.error(error);
}
- name: Add failure reaction to comment - name: Add failure reaction to comment
if: failure() if: failure()
uses: actions/github-script@60a0d83039c74a4aee543508d2ffcb1c3799cdea # v7.0.1 uses: actions/github-script@60a0d83039c74a4aee543508d2ffcb1c3799cdea # v7.0.1
with: with:
github-token: ${{ steps.generate-token.outputs.token }} github-token: ${{ steps.setup-bot.outputs.token }}
script: | script: |
console.log(`Adding -1 reaction to comment ID: ${{ needs.check-comment.outputs.comment_id }}`); console.log(`Adding -1 reaction to comment ID: ${{ needs.check-comment.outputs.comment_id }}`);
try { try {
@ -337,7 +353,7 @@ jobs:
if: success() if: success()
uses: actions/github-script@60a0d83039c74a4aee543508d2ffcb1c3799cdea # v7.0.1 uses: actions/github-script@60a0d83039c74a4aee543508d2ffcb1c3799cdea # v7.0.1
with: with:
github-token: ${{ steps.generate-token.outputs.token }} github-token: ${{ steps.setup-bot.outputs.token }}
script: | script: |
const { GITHUB_REPOSITORY } = process.env; const { GITHUB_REPOSITORY } = process.env;
const [repoOwner, repoName] = GITHUB_REPOSITORY.split('/'); const [repoOwner, repoName] = GITHUB_REPOSITORY.split('/');
@ -357,3 +373,11 @@ jobs:
issue_number: prNumber, issue_number: prNumber,
body: commentBody body: commentBody
}); });
- name: Cleanup temporary files
if: always()
run: |
echo "Cleaning up temporary files..."
rm -f ../private.key docker-compose.yml
echo "Cleanup complete."
continue-on-error: true

View File

@ -1,7 +1,7 @@
name: PR Deployment cleanup name: PR Deployment cleanup
on: on:
pull_request: pull_request_target:
types: [opened, synchronize, reopened, closed] types: [opened, synchronize, reopened, closed]
permissions: permissions:
@ -13,11 +13,11 @@ env:
jobs: jobs:
cleanup: cleanup:
if: github.event.action == 'closed'
runs-on: ubuntu-latest runs-on: ubuntu-latest
permissions: permissions:
contents: write
pull-requests: write pull-requests: write
if: github.event.action == 'closed' issues: write
steps: steps:
- name: Harden Runner - name: Harden Runner
@ -25,13 +25,84 @@ jobs:
with: with:
egress-policy: audit egress-policy: audit
- name: Checkout PR
uses: actions/checkout@11bd71901bbe5b1630ceea73d27597364c9af683 # v4.2.2
- name: Setup GitHub App Bot
if: github.actor != 'dependabot[bot]'
id: setup-bot
uses: ./.github/actions/setup-bot
continue-on-error: true
with:
app-id: ${{ secrets.GH_APP_ID }}
private-key: ${{ secrets.GH_APP_PRIVATE_KEY }}
- name: Remove 'pr-deployed' label if present
id: remove-label-comment
uses: actions/github-script@60a0d83039c74a4aee543508d2ffcb1c3799cdea # v7.0.1
with:
github-token: ${{ steps.setup-bot.outputs.token }}
script: |
const prNumber = ${{ github.event.pull_request.number }};
const owner = context.repo.owner;
const repo = context.repo.repo;
// Hole alle Labels auf dem PR
const { data: labels } = await github.rest.issues.listLabelsOnIssue({
owner,
repo,
issue_number: prNumber
});
const hasLabel = labels.some(label => label.name === 'pr-deployed');
if (hasLabel) {
console.log("Label 'pr-deployed' found. Removing...");
await github.rest.issues.removeLabel({
owner,
repo,
issue_number: prNumber,
name: 'pr-deployed'
});
} else {
console.log("Label 'pr-deployed' not found. Nothing to do.");
}
// Find existing comment
const comments = await github.rest.issues.listComments({
owner,
repo,
issue_number: prNumber
});
const deploymentComments = comments.data.filter(c =>
c.body?.includes("## 🚀 PR Test Deployment") &&
c.user?.type === "Bot"
);
if (deploymentComments.length > 0) {
for (const comment of deploymentComments) {
await github.rest.issues.deleteComment({
owner,
repo,
comment_id: comment.id
});
console.log(`Deleted deployment comment (ID: ${comment.id})`);
}
} else {
console.log("No matching deployment comments found.");
}
core.setOutput('present', hasLabel || deploymentComment ? 'true' : 'false');
- name: Set up SSH - name: Set up SSH
if: steps.remove-label-comment.outputs.present == 'true'
run: | run: |
mkdir -p ~/.ssh/ mkdir -p ~/.ssh/
echo "${{ secrets.VPS_SSH_KEY }}" > ../private.key echo "${{ secrets.VPS_SSH_KEY }}" > ../private.key
sudo chmod 600 ../private.key sudo chmod 600 ../private.key
- name: Cleanup PR deployment - name: Cleanup PR deployment
if: steps.remove-label-comment.outputs.present == 'true'
id: cleanup id: cleanup
run: | run: |
ssh -i ../private.key -o StrictHostKeyChecking=no -o UserKnownHostsFile=/dev/null -T ${{ secrets.VPS_USERNAME }}@${{ secrets.VPS_HOST }} << 'ENDSSH' ssh -i ../private.key -o StrictHostKeyChecking=no -o UserKnownHostsFile=/dev/null -T ${{ secrets.VPS_USERNAME }}@${{ secrets.VPS_HOST }} << 'ENDSSH'
@ -57,3 +128,11 @@ jobs:
echo "NO_CLEANUP_NEEDED" echo "NO_CLEANUP_NEEDED"
fi fi
ENDSSH ENDSSH
- name: Cleanup temporary files
if: always()
run: |
echo "Cleaning up temporary files..."
rm -f ../private.key
echo "Cleanup complete."
continue-on-error: true

View File

@ -29,7 +29,7 @@ jobs:
- uses: gradle/actions/setup-gradle@ac638b010cf58a27ee6c972d7336334ccaf61c96 # v4.4.1 - uses: gradle/actions/setup-gradle@ac638b010cf58a27ee6c972d7336334ccaf61c96 # v4.4.1
- name: Generate Swagger documentation - name: Generate Swagger documentation
run: ./gradlew generateOpenApiDocs run: ./gradlew :stirling-pdf:generateOpenApiDocs
- name: Upload Swagger Documentation to SwaggerHub - name: Upload Swagger Documentation to SwaggerHub
run: ./gradlew swaggerhubUpload run: ./gradlew swaggerhubUpload

View File

@ -17,5 +17,7 @@
"GitHub.vscode-pull-request-github", // GitHub Pull Requests extension for Visual Studio Code "GitHub.vscode-pull-request-github", // GitHub Pull Requests extension for Visual Studio Code
"charliermarsh.ruff", // Ruff code formatter for Python to follow the Ruff Style Guide "charliermarsh.ruff", // Ruff code formatter for Python to follow the Ruff Style Guide
"yzhang.markdown-all-in-one", // Markdown All-in-One extension for enhanced Markdown editing "yzhang.markdown-all-in-one", // Markdown All-in-One extension for enhanced Markdown editing
"stylelint.vscode-stylelint", // Stylelint extension for CSS and SCSS linting
"redhat.vscode-yaml", // YAML extension for Visual Studio Code
] ]
} }

View File

@ -9,6 +9,9 @@
"[jsonc]": { "[jsonc]": {
"editor.defaultFormatter": "vscode.json-language-features" "editor.defaultFormatter": "vscode.json-language-features"
}, },
"[css]": {
"editor.defaultFormatter": "stylelint.vscode-stylelint"
},
"[json]": { "[json]": {
"editor.defaultFormatter": "vscode.json-language-features" "editor.defaultFormatter": "vscode.json-language-features"
}, },
@ -27,6 +30,9 @@
"[gradle]": { "[gradle]": {
"editor.defaultFormatter": "vscjava.vscode-gradle" "editor.defaultFormatter": "vscjava.vscode-gradle"
}, },
"[yaml]": {
"editor.defaultFormatter": "redhat.vscode-yaml"
},
"java.compile.nullAnalysis.mode": "automatic", "java.compile.nullAnalysis.mode": "automatic",
"java.configuration.updateBuildConfiguration": "interactive", "java.configuration.updateBuildConfiguration": "interactive",
"java.format.enabled": true, "java.format.enabled": true,
@ -119,6 +125,7 @@
"html.format.indentHandlebars": true, "html.format.indentHandlebars": true,
"html.format.preserveNewLines": true, "html.format.preserveNewLines": true,
"html.format.maxPreserveNewLines": 2, "html.format.maxPreserveNewLines": 2,
"stylelint.configFile": "devTools/.stylelintrc.json",
"java.project.sourcePaths": [ "java.project.sourcePaths": [
"stirling-pdf/src/main/java", "stirling-pdf/src/main/java",
"common/src/main/java", "common/src/main/java",

View File

@ -135,7 +135,7 @@ Stirling-PDF currently supports 40 languages!
| Indonesian (Bahasa Indonesia) (id_ID) | ![63%](https://geps.dev/progress/63) | | Indonesian (Bahasa Indonesia) (id_ID) | ![63%](https://geps.dev/progress/63) |
| Irish (Gaeilge) (ga_IE) | ![70%](https://geps.dev/progress/70) | | Irish (Gaeilge) (ga_IE) | ![70%](https://geps.dev/progress/70) |
| Italian (Italiano) (it_IT) | ![98%](https://geps.dev/progress/98) | | Italian (Italiano) (it_IT) | ![98%](https://geps.dev/progress/98) |
| Japanese (日本語) (ja_JP) | ![70%](https://geps.dev/progress/70) | | Japanese (日本語) (ja_JP) | ![95%](https://geps.dev/progress/95) |
| Korean (한국어) (ko_KR) | ![69%](https://geps.dev/progress/69) | | Korean (한국어) (ko_KR) | ![69%](https://geps.dev/progress/69) |
| Norwegian (Norsk) (no_NB) | ![67%](https://geps.dev/progress/67) | | Norwegian (Norsk) (no_NB) | ![67%](https://geps.dev/progress/67) |
| Persian (فارسی) (fa_IR) | ![66%](https://geps.dev/progress/66) | | Persian (فارسی) (fa_IR) | ![66%](https://geps.dev/progress/66) |
@ -145,7 +145,7 @@ Stirling-PDF currently supports 40 languages!
| Romanian (Română) (ro_RO) | ![59%](https://geps.dev/progress/59) | | Romanian (Română) (ro_RO) | ![59%](https://geps.dev/progress/59) |
| Russian (Русский) (ru_RU) | ![70%](https://geps.dev/progress/70) | | Russian (Русский) (ru_RU) | ![70%](https://geps.dev/progress/70) |
| Serbian Latin alphabet (Srpski) (sr_LATN_RS) | ![97%](https://geps.dev/progress/97) | | Serbian Latin alphabet (Srpski) (sr_LATN_RS) | ![97%](https://geps.dev/progress/97) |
| Simplified Chinese (简体中文) (zh_CN) | ![90%](https://geps.dev/progress/90) | | Simplified Chinese (简体中文) (zh_CN) | ![95%](https://geps.dev/progress/95) |
| Slovakian (Slovensky) (sk_SK) | ![53%](https://geps.dev/progress/53) | | Slovakian (Slovensky) (sk_SK) | ![53%](https://geps.dev/progress/53) |
| Slovenian (Slovenščina) (sl_SI) | ![73%](https://geps.dev/progress/73) | | Slovenian (Slovenščina) (sl_SI) | ![73%](https://geps.dev/progress/73) |
| Spanish (Español) (es_ES) | ![75%](https://geps.dev/progress/75) | | Spanish (Español) (es_ES) | ![75%](https://geps.dev/progress/75) |

View File

@ -6,7 +6,7 @@ plugins {
id "org.springdoc.openapi-gradle-plugin" version "1.9.0" id "org.springdoc.openapi-gradle-plugin" version "1.9.0"
id "io.swagger.swaggerhub" version "1.3.2" id "io.swagger.swaggerhub" version "1.3.2"
id "edu.sc.seis.launch4j" version "3.0.6" id "edu.sc.seis.launch4j" version "3.0.6"
id "com.diffplug.spotless" version "7.0.4" id "com.diffplug.spotless" version "7.1.0"
id "com.github.jk1.dependency-license-report" version "2.9" id "com.github.jk1.dependency-license-report" version "2.9"
//id "nebula.lint" version "19.0.3" //id "nebula.lint" version "19.0.3"
id "org.panteleyev.jpackageplugin" version "1.7.3" id "org.panteleyev.jpackageplugin" version "1.7.3"
@ -161,6 +161,44 @@ subprojects {
tasks.named("processResources") { tasks.named("processResources") {
dependsOn(rootProject.tasks.writeVersion) dependsOn(rootProject.tasks.writeVersion)
} }
if (name == 'stirling-pdf') {
apply plugin: 'org.springdoc.openapi-gradle-plugin'
openApi {
apiDocsUrl = "http://localhost:8080/v1/api-docs"
outputDir = file("$projectDir")
outputFileName = "SwaggerDoc.json"
waitTimeInSeconds = 60 // Increase the wait time to 60 seconds
}
tasks.named("forkedSpringBootRun") {
dependsOn(":common:jar")
dependsOn(":proprietary:jar")
}
tasks.register("copySwaggerDoc", Copy) {
doNotTrackState("Writes SwaggerDoc.json to project root")
from(layout.projectDirectory.file("SwaggerDoc.json"))
into(rootProject.projectDir)
dependsOn("generateOpenApiDocs")
}
tasks.register("cleanSwaggerInBuild", Delete) {
doNotTrackState("Cleans up SwaggerDoc.json in build directory")
delete(layout.projectDirectory.file("SwaggerDoc.json"))
dependsOn("copySwaggerDoc")
}
tasks.named("copySwaggerDoc") {
finalizedBy("cleanSwaggerInBuild")
}
tasks.named("generateOpenApiDocs") {
finalizedBy("copySwaggerDoc")
doNotTrackState("OpenAPI plugin writes outside build directory")
}
}
} }
tasks.withType(JavaCompile).configureEach { tasks.withType(JavaCompile).configureEach {
@ -169,6 +207,7 @@ tasks.withType(JavaCompile).configureEach {
} }
licenseReport { licenseReport {
projects = [project]
renderers = [new JsonReportRenderer()] renderers = [new JsonReportRenderer()]
allowedLicensesFile = new File("$projectDir/allowed-licenses.json") allowedLicensesFile = new File("$projectDir/allowed-licenses.json")
} }
@ -204,13 +243,6 @@ sourceSets {
} }
} }
openApi {
apiDocsUrl = "http://localhost:8080/v1/api-docs"
outputDir = file("$projectDir")
outputFileName = "SwaggerDoc.json"
waitTimeInSeconds = 60 // Increase the wait time to 60 seconds
}
// Configure the forked spring boot run task to properly delegate to the stirling-pdf module // Configure the forked spring boot run task to properly delegate to the stirling-pdf module
tasks.named('forkedSpringBootRun') { tasks.named('forkedSpringBootRun') {
dependsOn ':stirling-pdf:bootRun' dependsOn ':stirling-pdf:bootRun'
@ -565,9 +597,6 @@ tasks.register('printMacVersion') {
} }
} }
tasks.named('generateOpenApiDocs') {
doNotTrackState("Tracking state is not supported for this task")
}
tasks.named('bootRun') { tasks.named('bootRun') {
group = 'application' group = 'application'
description = 'Delegates to :stirling-pdf:bootRun' description = 'Delegates to :stirling-pdf:bootRun'

View File

@ -21,7 +21,7 @@ dependencies {
api 'com.googlecode.owasp-java-html-sanitizer:owasp-java-html-sanitizer:20240325.1' api 'com.googlecode.owasp-java-html-sanitizer:owasp-java-html-sanitizer:20240325.1'
api 'com.fathzer:javaluator:3.0.6' api 'com.fathzer:javaluator:3.0.6'
api 'com.posthog.java:posthog:1.2.0' api 'com.posthog.java:posthog:1.2.0'
api 'org.apache.commons:commons-lang3:3.17.0' api 'org.apache.commons:commons-lang3:3.18.0'
api 'com.drewnoakes:metadata-extractor:2.19.0' // Image metadata extractor api 'com.drewnoakes:metadata-extractor:2.19.0' // Image metadata extractor
api 'com.vladsch.flexmark:flexmark-html2md-converter:0.64.8' api 'com.vladsch.flexmark:flexmark-html2md-converter:0.64.8'
api "org.apache.pdfbox:pdfbox:$pdfboxVersion" api "org.apache.pdfbox:pdfbox:$pdfboxVersion"

View File

@ -43,6 +43,7 @@ public class AutoJobAspect {
// This aspect will run before any audit aspects due to @Order(0) // This aspect will run before any audit aspects due to @Order(0)
// Extract parameters from the request and annotation // Extract parameters from the request and annotation
boolean async = Boolean.parseBoolean(request.getParameter("async")); boolean async = Boolean.parseBoolean(request.getParameter("async"));
log.debug("AutoJobAspect: Processing {} {} with async={}", request.getMethod(), request.getRequestURI(), async);
long timeout = autoJobPostMapping.timeout(); long timeout = autoJobPostMapping.timeout();
int retryCount = autoJobPostMapping.retryCount(); int retryCount = autoJobPostMapping.retryCount();
boolean trackProgress = autoJobPostMapping.trackProgress(); boolean trackProgress = autoJobPostMapping.trackProgress();
@ -54,19 +55,8 @@ public class AutoJobAspect {
retryCount, retryCount,
trackProgress); trackProgress);
// Copy and process arguments // Process arguments in-place to avoid type mismatch issues
// In a test environment, we might need to update the original objects for verification Object[] args = processArgsInPlace(joinPoint.getArgs(), async);
boolean isTestEnvironment = false;
try {
isTestEnvironment = Class.forName("org.junit.jupiter.api.Test") != null;
} catch (ClassNotFoundException e) {
// Not in a test environment
}
Object[] args =
isTestEnvironment
? processArgsInPlace(joinPoint.getArgs(), async)
: copyAndProcessArgs(joinPoint.getArgs(), async);
// Extract queueable and resourceWeight parameters and validate // Extract queueable and resourceWeight parameters and validate
boolean queueable = autoJobPostMapping.queueable(); boolean queueable = autoJobPostMapping.queueable();
@ -229,79 +219,10 @@ public class AutoJobAspect {
resourceWeight); resourceWeight);
} }
/**
* Creates deep copies of arguments when needed to avoid mutating the original objects
* Particularly important for PDFFile objects that might be reused by Spring
*
* @param originalArgs The original arguments
* @param async Whether this is an async operation
* @return A new array with safely processed arguments
*/
private Object[] copyAndProcessArgs(Object[] originalArgs, boolean async) {
if (originalArgs == null || originalArgs.length == 0) {
return originalArgs;
}
Object[] processedArgs = new Object[originalArgs.length];
// Copy all arguments
for (int i = 0; i < originalArgs.length; i++) {
Object arg = originalArgs[i];
if (arg instanceof PDFFile pdfFile) {
// Create a copy of PDFFile to avoid mutating the original
// Using direct property access instead of reflection for better performance
PDFFile pdfFileCopy = new PDFFile();
pdfFileCopy.setFileId(pdfFile.getFileId());
pdfFileCopy.setFileInput(pdfFile.getFileInput());
// Case 1: fileId is provided but no fileInput
if (pdfFileCopy.getFileInput() == null && pdfFileCopy.getFileId() != null) {
try {
log.debug("Using fileId {} to get file content", pdfFileCopy.getFileId());
MultipartFile file = fileStorage.retrieveFile(pdfFileCopy.getFileId());
pdfFileCopy.setFileInput(file);
} catch (Exception e) {
throw new RuntimeException(
"Failed to resolve file by ID: " + pdfFileCopy.getFileId(), e);
}
}
// Case 2: For async requests, we need to make a copy of the MultipartFile
else if (async && pdfFileCopy.getFileInput() != null) {
try {
log.debug("Making persistent copy of uploaded file for async processing");
MultipartFile originalFile = pdfFileCopy.getFileInput();
String fileId = fileStorage.storeFile(originalFile);
// Store the fileId for later reference
pdfFileCopy.setFileId(fileId);
// Replace the original MultipartFile with our persistent copy
MultipartFile persistentFile = fileStorage.retrieveFile(fileId);
pdfFileCopy.setFileInput(persistentFile);
log.debug("Created persistent file copy with fileId: {}", fileId);
} catch (IOException e) {
throw new RuntimeException(
"Failed to create persistent copy of uploaded file", e);
}
}
processedArgs[i] = pdfFileCopy;
} else {
// For non-PDFFile objects, just pass the original reference
// If other classes need copy-on-write, add them here
processedArgs[i] = arg;
}
}
return processedArgs;
}
/** /**
* Processes arguments in-place for testing purposes This is similar to our original * Processes arguments in-place to handle file resolution and async file persistence.
* implementation before introducing copy-on-write It's only used in test environments to * This approach avoids type mismatch issues by modifying the original objects directly.
* maintain test compatibility
* *
* @param originalArgs The original arguments * @param originalArgs The original arguments
* @param async Whether this is an async operation * @param async Whether this is an async operation

View File

@ -6,6 +6,8 @@ import java.util.Collections;
import java.util.List; import java.util.List;
import java.util.concurrent.CopyOnWriteArrayList; import java.util.concurrent.CopyOnWriteArrayList;
import com.fasterxml.jackson.annotation.JsonIgnore;
import lombok.AllArgsConstructor; import lombok.AllArgsConstructor;
import lombok.Builder; import lombok.Builder;
import lombok.Data; import lombok.Data;
@ -28,6 +30,7 @@ public class JobResult {
private String error; private String error;
/** List of result files for jobs that produce files */ /** List of result files for jobs that produce files */
@JsonIgnore
private List<ResultFile> resultFiles; private List<ResultFile> resultFiles;
/** Time when the job was created */ /** Time when the job was created */

View File

@ -1,6 +1,5 @@
package stirling.software.common.service; package stirling.software.common.service;
import io.github.pixee.security.ZipSecurity;
import java.io.ByteArrayInputStream; import java.io.ByteArrayInputStream;
import java.io.ByteArrayOutputStream; import java.io.ByteArrayOutputStream;
import java.io.IOException; import java.io.IOException;
@ -21,6 +20,8 @@ import org.springframework.http.MediaType;
import org.springframework.stereotype.Service; import org.springframework.stereotype.Service;
import org.springframework.web.multipart.MultipartFile; import org.springframework.web.multipart.MultipartFile;
import io.github.pixee.security.ZipSecurity;
import jakarta.annotation.PreDestroy; import jakarta.annotation.PreDestroy;
import lombok.extern.slf4j.Slf4j; import lombok.extern.slf4j.Slf4j;
@ -361,7 +362,8 @@ public class TaskManager {
MultipartFile zipFile = fileStorage.retrieveFile(zipFileId); MultipartFile zipFile = fileStorage.retrieveFile(zipFileId);
try (ZipInputStream zipIn = try (ZipInputStream zipIn =
ZipSecurity.createHardenedInputStream(new ByteArrayInputStream(zipFile.getBytes()))) { ZipSecurity.createHardenedInputStream(
new ByteArrayInputStream(zipFile.getBytes()))) {
ZipEntry entry; ZipEntry entry;
while ((entry = zipIn.getNextEntry()) != null) { while ((entry = zipIn.getNextEntry()) != null) {
if (!entry.isDirectory()) { if (!entry.isDirectory()) {

47
devGuide/STYLELINT.md Normal file
View File

@ -0,0 +1,47 @@
# STYLELINT.md
## Usage
Apply Stylelint to your project's CSS with the following steps:
1. **NPM Script**
- Go to directory: `devTools/`
- Add Stylelint & stylistic/stylelint-plugin
```bash
npm install --save-dev stylelint stylelint-config-standard
npm install --save-dev @stylistic/stylelint-plugin
```
- Add a script entry to your `package.json`:
```jsonc
{
"scripts": {
"lint:css:check": "stylelint \"../stirling-pdf/src/main/**/*.css\" \"../proprietary/src/main/resources/static/css/*.css\" --config .stylelintrc.json",
"lint:css:fix": "stylelint \"../stirling-pdf/src/main/**/*.css\" \"../proprietary/src/main/resources/static/css/*.css\" --config .stylelintrc.json --fix"
}
}
```
- Run the linter:
```bash
npm run lint:css:check
npm run lint:css:fix
```
2. **CLI Usage**
- Lint all CSS files:
```bash
npx stylelint ../stirling-pdf/src/main/**/*.css ../proprietary/src/main/resources/static/css/*.css
```
- Lint a single file:
```bash
npx stylelint ../proprietary/src/main/resources/static/css/audit-dashboard.css
```
- Apply automatic fixes:
```bash
npx stylelint "../stirling-pdf/src/main/**/*.css" "../proprietary/src/main/resources/static/css/*.css" --fix
```
For full configuration options and rule customization, refer to the official documentation: [https://stylelint.io](https://stylelint.io)

View File

@ -0,0 +1,69 @@
{
"extends": [
"stylelint-config-standard"
],
"plugins": [
"@stylistic/stylelint-plugin"
],
"ignoreFiles": [
"stirling-pdf/src/main/resources/static/css/bootstrap*.css",
"stirling-pdf/src/main/resources/static/css/cookieconsent.css",
"stirling-pdf/src/main/resources/static/css/cookieconsentCustomisation.css",
"stirling-pdf/src/main/resources/static/css/prism.css",
"stirling-pdf/src/main/resources/static/pdfjs-legacy/**/*.css"
],
"rules": {
"property-no-vendor-prefix": null,
"value-no-vendor-prefix": null,
"selector-no-vendor-prefix": null,
"media-feature-name-no-vendor-prefix": null,
"value-keyword-case": null,
"color-function-notation": null,
"alpha-value-notation": null,
"color-function-alias-notation": null,
"selector-class-pattern": null,
"selector-id-pattern": null,
"declaration-block-no-redundant-longhand-properties": null,
"media-feature-range-notation": "prefix",
"selector-attribute-quotes": null,
"at-rule-no-vendor-prefix": null,
"selector-not-notation": null,
"no-duplicate-selectors": [
true,
{
"disableFix": true
}
],
"comment-word-disallowed-list": null,
"custom-property-pattern": null,
"no-descending-specificity": null,
"keyframes-name-pattern": null,
"comment-empty-line-before": [
"always",
{
"ignore": [
"stylelint-commands"
]
}
],
"block-no-empty": true,
"@stylistic/declaration-bang-space-after": "never",
"@stylistic/declaration-bang-space-before": "always",
"@stylistic/declaration-block-trailing-semicolon": "always",
"@stylistic/function-comma-space-after": [
"always-single-line",
{
"disableFix": false
}
],
"@stylistic/function-comma-space-before": "never",
"@stylistic/color-hex-case": "lower",
"@stylistic/declaration-block-semicolon-newline-after": "always",
"@stylistic/indentation": [
2,
{
"baseIndentLevel": 2
}
]
}
}

1598
devTools/package-lock.json generated Normal file

File diff suppressed because it is too large Load Diff

13
devTools/package.json Normal file
View File

@ -0,0 +1,13 @@
{
"name": "stirling-pdf",
"version": "1.0.0",
"scripts": {
"lint:css:check": "stylelint \"../stirling-pdf/src/main/**/*.css\" \"../proprietary/src/main/resources/static/css/*.css\" --config .stylelintrc.json",
"lint:css:fix": "stylelint \"../stirling-pdf/src/main/**/*.css\" \"../proprietary/src/main/resources/static/css/*.css\" --config .stylelintrc.json --fix"
},
"devDependencies": {
"@stylistic/stylelint-plugin": "^3.1.3",
"stylelint": "^16.21.1",
"stylelint-config-standard": "^38.0.0"
}
}

View File

@ -37,7 +37,7 @@ dependencies {
implementation 'org.thymeleaf.extras:thymeleaf-extras-springsecurity5:3.1.3.RELEASE' implementation 'org.thymeleaf.extras:thymeleaf-extras-springsecurity5:3.1.3.RELEASE'
api 'io.micrometer:micrometer-registry-prometheus' api 'io.micrometer:micrometer-registry-prometheus'
implementation 'com.unboundid.product.scim2:scim2-sdk-client:2.3.5' implementation 'com.unboundid.product.scim2:scim2-sdk-client:4.0.0'
runtimeOnly 'com.h2database:h2:2.3.232' // Don't upgrade h2database runtimeOnly 'com.h2database:h2:2.3.232' // Don't upgrade h2database
runtimeOnly 'org.postgresql:postgresql:42.7.7' runtimeOnly 'org.postgresql:postgresql:42.7.7'
constraints { constraints {

View File

@ -529,7 +529,6 @@ ignore = [
[ja_JP] [ja_JP]
ignore = [ ignore = [
'lang.jav',
'language.direction', 'language.direction',
] ]

View File

@ -29,7 +29,8 @@ public class CleanUrlInterceptor implements HandlerInterceptor {
"type", "type",
"principal", "principal",
"startDate", "startDate",
"endDate"); "endDate",
"async");
@Override @Override
public boolean preHandle( public boolean preHandle(

View File

@ -165,12 +165,6 @@
"moduleLicense": "Apache-2.0", "moduleLicense": "Apache-2.0",
"moduleLicenseUrl": "https://www.apache.org/licenses/LICENSE-2.0.txt" "moduleLicenseUrl": "https://www.apache.org/licenses/LICENSE-2.0.txt"
}, },
{
"moduleName": "com.google.errorprone:error_prone_annotations",
"moduleVersion": "2.11.0",
"moduleLicense": "Apache 2.0",
"moduleLicenseUrl": "http://www.apache.org/licenses/LICENSE-2.0.txt"
},
{ {
"moduleName": "com.google.errorprone:error_prone_annotations", "moduleName": "com.google.errorprone:error_prone_annotations",
"moduleUrl": "https://errorprone.info/error_prone_annotations", "moduleUrl": "https://errorprone.info/error_prone_annotations",
@ -639,13 +633,6 @@
"moduleLicense": "Apache License, Version 2.0", "moduleLicense": "Apache License, Version 2.0",
"moduleLicenseUrl": "https://www.apache.org/licenses/LICENSE-2.0.txt" "moduleLicenseUrl": "https://www.apache.org/licenses/LICENSE-2.0.txt"
}, },
{
"moduleName": "io.swagger.core.v3:swagger-annotations-jakarta",
"moduleUrl": "https://github.com/swagger-api/swagger-core/modules/swagger-annotations",
"moduleVersion": "2.2.30",
"moduleLicense": "Apache License, Version 2.0",
"moduleLicenseUrl": "https://www.apache.org/licenses/LICENSE-2.0"
},
{ {
"moduleName": "io.swagger.core.v3:swagger-annotations-jakarta", "moduleName": "io.swagger.core.v3:swagger-annotations-jakarta",
"moduleUrl": "https://github.com/swagger-api/swagger-core/modules/swagger-annotations", "moduleUrl": "https://github.com/swagger-api/swagger-core/modules/swagger-annotations",
@ -653,13 +640,6 @@
"moduleLicense": "Apache License, Version 2.0", "moduleLicense": "Apache License, Version 2.0",
"moduleLicenseUrl": "https://www.apache.org/licenses/LICENSE-2.0" "moduleLicenseUrl": "https://www.apache.org/licenses/LICENSE-2.0"
}, },
{
"moduleName": "io.swagger.core.v3:swagger-core-jakarta",
"moduleUrl": "https://github.com/swagger-api/swagger-core/modules/swagger-core",
"moduleVersion": "2.2.30",
"moduleLicense": "Apache License, Version 2.0",
"moduleLicenseUrl": "https://www.apache.org/licenses/LICENSE-2.0"
},
{ {
"moduleName": "io.swagger.core.v3:swagger-core-jakarta", "moduleName": "io.swagger.core.v3:swagger-core-jakarta",
"moduleUrl": "https://github.com/swagger-api/swagger-core/modules/swagger-core", "moduleUrl": "https://github.com/swagger-api/swagger-core/modules/swagger-core",
@ -667,13 +647,6 @@
"moduleLicense": "Apache License, Version 2.0", "moduleLicense": "Apache License, Version 2.0",
"moduleLicenseUrl": "https://www.apache.org/licenses/LICENSE-2.0" "moduleLicenseUrl": "https://www.apache.org/licenses/LICENSE-2.0"
}, },
{
"moduleName": "io.swagger.core.v3:swagger-models-jakarta",
"moduleUrl": "https://github.com/swagger-api/swagger-core/modules/swagger-models",
"moduleVersion": "2.2.30",
"moduleLicense": "Apache License, Version 2.0",
"moduleLicenseUrl": "https://www.apache.org/licenses/LICENSE-2.0"
},
{ {
"moduleName": "io.swagger.core.v3:swagger-models-jakarta", "moduleName": "io.swagger.core.v3:swagger-models-jakarta",
"moduleUrl": "https://github.com/swagger-api/swagger-core/modules/swagger-models", "moduleUrl": "https://github.com/swagger-api/swagger-core/modules/swagger-models",
@ -744,13 +717,6 @@
"moduleLicense": "GPL2 w/ CPE", "moduleLicense": "GPL2 w/ CPE",
"moduleLicenseUrl": "https://www.gnu.org/software/classpath/license.html" "moduleLicenseUrl": "https://www.gnu.org/software/classpath/license.html"
}, },
{
"moduleName": "jakarta.servlet:jakarta.servlet-api",
"moduleUrl": "https://www.eclipse.org",
"moduleVersion": "6.1.0",
"moduleLicense": "GPL2 w/ CPE",
"moduleLicenseUrl": "https://www.gnu.org/software/classpath/license.html"
},
{ {
"moduleName": "jakarta.transaction:jakarta.transaction-api", "moduleName": "jakarta.transaction:jakarta.transaction-api",
"moduleUrl": "https://projects.eclipse.org/projects/ee4j.jta", "moduleUrl": "https://projects.eclipse.org/projects/ee4j.jta",
@ -889,13 +855,6 @@
"moduleLicense": "Apache-2.0", "moduleLicense": "Apache-2.0",
"moduleLicenseUrl": "https://www.apache.org/licenses/LICENSE-2.0.txt" "moduleLicenseUrl": "https://www.apache.org/licenses/LICENSE-2.0.txt"
}, },
{
"moduleName": "org.apache.commons:commons-text",
"moduleUrl": "https://commons.apache.org/proper/commons-text",
"moduleVersion": "1.10.0",
"moduleLicense": "Apache License, Version 2.0",
"moduleLicenseUrl": "https://www.apache.org/licenses/LICENSE-2.0.txt"
},
{ {
"moduleName": "org.apache.commons:commons-text", "moduleName": "org.apache.commons:commons-text",
"moduleUrl": "https://commons.apache.org/proper/commons-text", "moduleUrl": "https://commons.apache.org/proper/commons-text",
@ -1018,13 +977,6 @@
"moduleLicense": "The Apache Software License, Version 2.0", "moduleLicense": "The Apache Software License, Version 2.0",
"moduleLicenseUrl": "https://www.apache.org/licenses/LICENSE-2.0.txt" "moduleLicenseUrl": "https://www.apache.org/licenses/LICENSE-2.0.txt"
}, },
{
"moduleName": "org.bouncycastle:bcpkix-jdk18on",
"moduleUrl": "https://www.bouncycastle.org/java.html",
"moduleVersion": "1.72",
"moduleLicense": "Bouncy Castle Licence",
"moduleLicenseUrl": "https://www.bouncycastle.org/licence.html"
},
{ {
"moduleName": "org.bouncycastle:bcpkix-jdk18on", "moduleName": "org.bouncycastle:bcpkix-jdk18on",
"moduleUrl": "https://www.bouncycastle.org/download/bouncy-castle-java/", "moduleUrl": "https://www.bouncycastle.org/download/bouncy-castle-java/",
@ -1039,13 +991,6 @@
"moduleLicense": "Bouncy Castle Licence", "moduleLicense": "Bouncy Castle Licence",
"moduleLicenseUrl": "https://www.bouncycastle.org/licence.html" "moduleLicenseUrl": "https://www.bouncycastle.org/licence.html"
}, },
{
"moduleName": "org.bouncycastle:bcutil-jdk18on",
"moduleUrl": "https://www.bouncycastle.org/java.html",
"moduleVersion": "1.72",
"moduleLicense": "Bouncy Castle Licence",
"moduleLicenseUrl": "https://www.bouncycastle.org/licence.html"
},
{ {
"moduleName": "org.bouncycastle:bcutil-jdk18on", "moduleName": "org.bouncycastle:bcutil-jdk18on",
"moduleUrl": "https://www.bouncycastle.org/download/bouncy-castle-java/", "moduleUrl": "https://www.bouncycastle.org/download/bouncy-castle-java/",
@ -1562,13 +1507,6 @@
"moduleLicense": "Apache License, Version 2.0", "moduleLicense": "Apache License, Version 2.0",
"moduleLicenseUrl": "https://www.apache.org/licenses/LICENSE-2.0" "moduleLicenseUrl": "https://www.apache.org/licenses/LICENSE-2.0"
}, },
{
"moduleName": "org.springframework.boot:spring-boot-devtools",
"moduleUrl": "https://spring.io/projects/spring-boot",
"moduleVersion": "3.5.3",
"moduleLicense": "Apache License, Version 2.0",
"moduleLicenseUrl": "https://www.apache.org/licenses/LICENSE-2.0"
},
{ {
"moduleName": "org.springframework.boot:spring-boot-starter", "moduleName": "org.springframework.boot:spring-boot-starter",
"moduleUrl": "https://spring.io/projects/spring-boot", "moduleUrl": "https://spring.io/projects/spring-boot",

View File

@ -65,17 +65,23 @@ premium:
key: 00000000-0000-0000-0000-000000000000 key: 00000000-0000-0000-0000-000000000000
enabled: false # Enable license key checks for pro/enterprise features enabled: false # Enable license key checks for pro/enterprise features
proFeatures: proFeatures:
database: true # Enable database features
SSOAutoLogin: false SSOAutoLogin: false
CustomMetadata: CustomMetadata:
autoUpdateMetadata: false # set to 'true' to automatically update metadata with below values autoUpdateMetadata: false
author: username # supports text such as 'John Doe' or types such as username to autopopulate with user's username author: username
creator: Stirling-PDF # supports text such as 'Company-PDF' creator: Stirling-PDF
producer: Stirling-PDF # supports text such as 'Company-PDF' producer: Stirling-PDF
googleDrive: googleDrive:
enabled: false enabled: false
clientId: '' clientId: ''
apiKey: '' apiKey: ''
appId: '' appId: ''
enterpriseFeatures:
audit:
enabled: true # Enable audit logging
level: 2 # Audit logging level: 0=OFF, 1=BASIC, 2=STANDARD, 3=VERBOSE
retentionDays: 90 # Number of days to retain audit logs
mail: mail:
enabled: false # set to 'true' to enable sending emails enabled: false # set to 'true' to enable sending emails
@ -86,7 +92,7 @@ mail:
from: '' # sender email address from: '' # sender email address
legal: legal:
termsAndConditions: https://www.stirlingpdf.com/terms # URL to the terms and conditions of your application (e.g. https://example.com/terms). Empty string to disable or filename to load from local file in static folder termsAndConditions: https://www.stirlingpdf.com/terms-and-conditions # URL to the terms and conditions of your application (e.g. https://example.com/terms). Empty string to disable or filename to load from local file in static folder
privacyPolicy: https://www.stirlingpdf.com/privacy-policy # URL to the privacy policy of your application (e.g. https://example.com/privacy). Empty string to disable or filename to load from local file in static folder privacyPolicy: https://www.stirlingpdf.com/privacy-policy # URL to the privacy policy of your application (e.g. https://example.com/privacy). Empty string to disable or filename to load from local file in static folder
accessibilityStatement: '' # URL to the accessibility statement of your application (e.g. https://example.com/accessibility). Empty string to disable or filename to load from local file in static folder accessibilityStatement: '' # URL to the accessibility statement of your application (e.g. https://example.com/accessibility). Empty string to disable or filename to load from local file in static folder
cookiePolicy: '' # URL to the cookie policy of your application (e.g. https://example.com/cookie). Empty string to disable or filename to load from local file in static folder cookiePolicy: '' # URL to the cookie policy of your application (e.g. https://example.com/cookie). Empty string to disable or filename to load from local file in static folder
@ -120,6 +126,15 @@ system:
weasyprint: '' # Defaults to /opt/venv/bin/weasyprint weasyprint: '' # Defaults to /opt/venv/bin/weasyprint
unoconvert: '' # Defaults to /opt/venv/bin/unoconvert unoconvert: '' # Defaults to /opt/venv/bin/unoconvert
fileUploadLimit: '' # Defaults to "". No limit when string is empty. Set a number, between 0 and 999, followed by one of the following strings to set a limit. "KB", "MB", "GB". fileUploadLimit: '' # Defaults to "". No limit when string is empty. Set a number, between 0 and 999, followed by one of the following strings to set a limit. "KB", "MB", "GB".
tempFileManagement:
baseTmpDir: '' # Defaults to java.io.tmpdir/stirling-pdf
libreofficeDir: '' # Defaults to tempFileManagement.baseTmpDir/libreoffice
systemTempDir: '' # Only used if cleanupSystemTemp is true
prefix: stirling-pdf- # Prefix for temp file names
maxAgeHours: 24 # Maximum age in hours before temp files are cleaned up
cleanupIntervalMinutes: 30 # How often to run cleanup (in minutes)
startupCleanup: true # Clean up old temp files on startup
cleanupSystemTemp: false # Whether to clean broader system temp directory
ui: ui:
appName: '' # application's visible name appName: '' # application's visible name
@ -150,6 +165,8 @@ processExecutor:
weasyPrintSessionLimit: 16 weasyPrintSessionLimit: 16
installAppSessionLimit: 1 installAppSessionLimit: 1
calibreSessionLimit: 1 calibreSessionLimit: 1
ghostscriptSessionLimit: 8
ocrMyPdfSessionLimit: 2
timeoutMinutes: # Process executor timeout in minutes timeoutMinutes: # Process executor timeout in minutes
libreOfficetimeoutMinutes: 30 libreOfficetimeoutMinutes: 30
pdfToHtmltimeoutMinutes: 20 pdfToHtmltimeoutMinutes: 20
@ -158,3 +175,6 @@ processExecutor:
installApptimeoutMinutes: 60 installApptimeoutMinutes: 60
calibretimeoutMinutes: 30 calibretimeoutMinutes: 30
tesseractTimeoutMinutes: 30 tesseractTimeoutMinutes: 30
qpdfTimeoutMinutes: 30
ghostscriptTimeoutMinutes: 30
ocrMyPdfTimeoutMinutes: 30

View File

@ -1,21 +1,25 @@
import os import os
def before_all(context): def before_all(context):
context.endpoint = None context.endpoint = None
context.request_data = None context.request_data = None
context.files = {} context.files = {}
context.response = None context.response = None
def after_scenario(context, scenario): def after_scenario(context, scenario):
if hasattr(context, 'files'): if hasattr(context, "files"):
for file in context.files.values(): for file in context.files.values():
file.close() file.close()
if os.path.exists('response_file'): if os.path.exists("response_file"):
os.remove('response_file') os.remove("response_file")
if hasattr(context, 'file_name') and os.path.exists(context.file_name): if hasattr(context, "file_name") and os.path.exists(context.file_name):
os.remove(context.file_name) os.remove(context.file_name)
# Remove any temporary files # Remove any temporary files
for temp_file in os.listdir('.'): for temp_file in os.listdir("."):
if temp_file.startswith('genericNonCustomisableName') or temp_file.startswith('temp_image_'): if temp_file.startswith("genericNonCustomisableName") or temp_file.startswith(
"temp_image_"
):
os.remove(temp_file) os.remove(temp_file)

View File

@ -22,7 +22,7 @@ Feature: API Validation
| cleanFinal | true | | cleanFinal | true |
| ocrType | Normal | | ocrType | Normal |
| ocrRenderType | hocr | | ocrRenderType | hocr |
| removeImagesAfter| false | | removeImagesAfter | false |
When I send the API request to the endpoint "/api/v1/misc/ocr-pdf" When I send the API request to the endpoint "/api/v1/misc/ocr-pdf"
Then the response content type should be "application/pdf" Then the response content type should be "application/pdf"
And the response file should have size greater than 0 And the response file should have size greater than 0
@ -60,7 +60,7 @@ Feature: API Validation
| cleanFinal | true | | cleanFinal | true |
| ocrType | Force | | ocrType | Force |
| ocrRenderType | hocr | | ocrRenderType | hocr |
| removeImagesAfter| false | | removeImagesAfter | false |
When I send the API request to the endpoint "/api/v1/misc/ocr-pdf" When I send the API request to the endpoint "/api/v1/misc/ocr-pdf"
Then the response content type should be "application/pdf" Then the response content type should be "application/pdf"
And the response file should have size greater than 0 And the response file should have size greater than 0
@ -164,6 +164,27 @@ Feature: API Validation
| presentation | odp | .odp | | presentation | odp | .odp |
| html | html | .zip | | html | html | .zip |
@image @positive
Scenario Outline: Convert PDF to image
Given I generate a PDF file as "fileInput"
And the pdf contains 3 pages with random text
And the pdf contains 3 images of size 300x300 on 3 pages
And the request data includes
| parameter | value |
| dpi | 300 |
| imageFormat | <format> |
When I send the API request to the endpoint "/api/v1/convert/pdf/img"
Then the response status code should be 200
And the response file should have size greater than 100
And the response file should have extension ".zip"
Examples:
| format |
| webp |
| png |
| jpeg |
| jpg |
| gif |
@libre @positive @topdf @libre @positive @topdf
Scenario Outline: Convert PDF to various types Scenario Outline: Convert PDF to various types
@ -227,4 +248,3 @@ Feature: API Validation
And the response file should have size greater than 200 And the response file should have size greater than 200
And the response file should have extension ".zip" And the response file should have extension ".zip"
And the response ZIP should contain 3 files And the response ZIP should contain 3 files

View File

@ -24,29 +24,6 @@ Feature: API Validation
| 1 | 2 | 12 | | 1 | 2 | 12 |
| 2 | 2 | 18 | | 2 | 2 | 18 |
@split-pdf-by-sections @positive
Scenario Outline: split-pdf-by-sections with different parameters
Given I generate a PDF file as "fileInput"
And the pdf contains 2 pages
And the request data includes
| parameter | value |
| horizontalDivisions | <horizontalDivisions> |
| verticalDivisions | <verticalDivisions> |
| merge | true |
When I send the API request to the endpoint "/api/v1/general/split-pdf-by-sections"
Then the response content type should be "application/pdf"
And the response file should have size greater than 200
And the response status code should be 200
And the response PDF should contain <page_count> pages
Examples:
| horizontalDivisions | verticalDivisions | page_count |
| 0 | 1 | 4 |
| 1 | 1 | 8 |
| 1 | 2 | 12 |
| 2 | 2 | 18 |
@split-pdf-by-pages @positive @split-pdf-by-pages @positive
Scenario Outline: split-pdf-by-pages with different parameters Scenario Outline: split-pdf-by-pages with different parameters
@ -70,7 +47,6 @@ Feature: API Validation
| 3n | 7 | | 3n | 7 |
@split-pdf-by-size-or-count @positive @split-pdf-by-size-or-count @positive
Scenario Outline: split-pdf-by-size-or-count with different parameters Scenario Outline: split-pdf-by-size-or-count with different parameters
Given I generate a PDF file as "fileInput" Given I generate a PDF file as "fileInput"

View File

@ -10,67 +10,67 @@ from reportlab.lib.pagesizes import letter
from reportlab.lib.utils import ImageReader from reportlab.lib.utils import ImageReader
from reportlab.pdfgen import canvas from reportlab.pdfgen import canvas
import mimetypes import mimetypes
import requests
import zipfile import zipfile
import shutil
import re import re
from PIL import Image, ImageDraw from PIL import Image, ImageDraw
API_HEADERS = { API_HEADERS = {"X-API-KEY": "123456789"}
'X-API-KEY': '123456789'
}
######### #########
# GIVEN # # GIVEN #
######### #########
@given('I generate a PDF file as "{fileInput}"') @given('I generate a PDF file as "{fileInput}"')
def step_generate_pdf(context, fileInput): def step_generate_pdf(context, fileInput):
context.param_name = fileInput context.param_name = fileInput
context.file_name = "genericNonCustomisableName.pdf" context.file_name = "genericNonCustomisableName.pdf"
writer = PdfWriter() writer = PdfWriter()
writer.add_blank_page(width=72, height=72) # Single blank page writer.add_blank_page(width=72, height=72) # Single blank page
with open(context.file_name, 'wb') as f: with open(context.file_name, "wb") as f:
writer.write(f) writer.write(f)
if not hasattr(context, 'files'): if not hasattr(context, "files"):
context.files = {} context.files = {}
context.files[context.param_name] = open(context.file_name, 'rb') context.files[context.param_name] = open(context.file_name, "rb")
@given('I use an example file at "{filePath}" as parameter "{fileInput}"') @given('I use an example file at "{filePath}" as parameter "{fileInput}"')
def step_use_example_file(context, filePath, fileInput): def step_use_example_file(context, filePath, fileInput):
context.param_name = fileInput context.param_name = fileInput
context.file_name = filePath.split('/')[-1] context.file_name = filePath.split("/")[-1]
if not hasattr(context, 'files'): if not hasattr(context, "files"):
context.files = {} context.files = {}
# Ensure the file exists before opening # Ensure the file exists before opening
try: try:
example_file = open(filePath, 'rb') example_file = open(filePath, "rb")
context.files[context.param_name] = example_file context.files[context.param_name] = example_file
except FileNotFoundError: except FileNotFoundError:
raise FileNotFoundError(f"The example file '{filePath}' does not exist.") raise FileNotFoundError(f"The example file '{filePath}' does not exist.")
@given('the pdf contains {page_count:d} pages')
@given("the pdf contains {page_count:d} pages")
def step_pdf_contains_pages(context, page_count): def step_pdf_contains_pages(context, page_count):
writer = PdfWriter() writer = PdfWriter()
for i in range(page_count): for i in range(page_count):
writer.add_blank_page(width=72, height=72) writer.add_blank_page(width=72, height=72)
with open(context.file_name, 'wb') as f: with open(context.file_name, "wb") as f:
writer.write(f) writer.write(f)
context.files[context.param_name].close() context.files[context.param_name].close()
context.files[context.param_name] = open(context.file_name, 'rb') context.files[context.param_name] = open(context.file_name, "rb")
# Duplicate for now... # Duplicate for now...
@given('the pdf contains {page_count:d} blank pages') @given("the pdf contains {page_count:d} blank pages")
def step_pdf_contains_blank_pages(context, page_count): def step_pdf_contains_blank_pages(context, page_count):
writer = PdfWriter() writer = PdfWriter()
for i in range(page_count): for i in range(page_count):
writer.add_blank_page(width=72, height=72) writer.add_blank_page(width=72, height=72)
with open(context.file_name, 'wb') as f: with open(context.file_name, "wb") as f:
writer.write(f) writer.write(f)
context.files[context.param_name].close() context.files[context.param_name].close()
context.files[context.param_name] = open(context.file_name, 'rb') context.files[context.param_name] = open(context.file_name, "rb")
def create_black_box_image(file_name, size): def create_black_box_image(file_name, size):
can = canvas.Canvas(file_name, pagesize=size) can = canvas.Canvas(file_name, pagesize=size)
@ -80,14 +80,20 @@ def create_black_box_image(file_name, size):
can.showPage() can.showPage()
can.save() can.save()
@given(u'the pdf contains {image_count:d} images of size {width:d}x{height:d} on {page_count:d} pages')
@given(
"the pdf contains {image_count:d} images of size {width:d}x{height:d} on {page_count:d} pages"
)
def step_impl(context, image_count, width, height, page_count): def step_impl(context, image_count, width, height, page_count):
context.param_name = "fileInput" context.param_name = "fileInput"
context.file_name = "genericNonCustomisableName.pdf" context.file_name = "genericNonCustomisableName.pdf"
create_pdf_with_images_and_boxes(context.file_name, image_count, page_count, width, height) create_pdf_with_images_and_boxes(
if not hasattr(context, 'files'): context.file_name, image_count, page_count, width, height
)
if not hasattr(context, "files"):
context.files = {} context.files = {}
context.files[context.param_name] = open(context.file_name, 'rb') context.files[context.param_name] = open(context.file_name, "rb")
def add_black_boxes_to_image(image): def add_black_boxes_to_image(image):
if isinstance(image, str): if isinstance(image, str):
@ -97,9 +103,14 @@ def add_black_boxes_to_image(image):
draw.rectangle([(0, 0), image.size], fill=(0, 0, 0)) # Fill image with black draw.rectangle([(0, 0), image.size], fill=(0, 0, 0)) # Fill image with black
return image return image
def create_pdf_with_images_and_boxes(file_name, image_count, page_count, image_width, image_height):
def create_pdf_with_images_and_boxes(
file_name, image_count, page_count, image_width, image_height
):
page_width, page_height = max(letter[0], image_width), max(letter[1], image_height) page_width, page_height = max(letter[0], image_width), max(letter[1], image_height)
boxes_per_page = image_count // page_count + (1 if image_count % page_count != 0 else 0) boxes_per_page = image_count // page_count + (
1 if image_count % page_count != 0 else 0
)
writer = PdfWriter() writer = PdfWriter()
box_counter = 0 box_counter = 0
@ -114,12 +125,14 @@ def create_pdf_with_images_and_boxes(file_name, image_count, page_count, image_w
# Simulating a dynamic image creation (replace this with your actual image creation logic) # Simulating a dynamic image creation (replace this with your actual image creation logic)
# For demonstration, we'll create a simple black image # For demonstration, we'll create a simple black image
dummy_image = Image.new('RGB', (image_width, image_height), color='white') # Create a white image dummy_image = Image.new(
"RGB", (image_width, image_height), color="white"
) # Create a white image
dummy_image = add_black_boxes_to_image(dummy_image) # Add black boxes dummy_image = add_black_boxes_to_image(dummy_image) # Add black boxes
# Convert the PIL Image to bytes to pass to drawImage # Convert the PIL Image to bytes to pass to drawImage
image_bytes = io.BytesIO() image_bytes = io.BytesIO()
dummy_image.save(image_bytes, format='PNG') dummy_image.save(image_bytes, format="PNG")
image_bytes.seek(0) image_bytes.seek(0)
# Check if the image fits in the current page dimensions # Check if the image fits in the current page dimensions
@ -130,7 +143,9 @@ def create_pdf_with_images_and_boxes(file_name, image_count, page_count, image_w
break break
# Add the image to the PDF # Add the image to the PDF
can.drawImage(ImageReader(image_bytes), x, y, width=image_width, height=image_height) can.drawImage(
ImageReader(image_bytes), x, y, width=image_width, height=image_height
)
box_counter += 1 box_counter += 1
can.showPage() can.showPage()
@ -140,7 +155,7 @@ def create_pdf_with_images_and_boxes(file_name, image_count, page_count, image_w
writer.add_page(new_pdf.pages[0]) writer.add_page(new_pdf.pages[0])
# Write the PDF to file # Write the PDF to file
with open(file_name, 'wb') as f: with open(file_name, "wb") as f:
writer.write(f) writer.write(f)
# Clean up temporary image files # Clean up temporary image files
@ -149,36 +164,81 @@ def create_pdf_with_images_and_boxes(file_name, image_count, page_count, image_w
if os.path.exists(temp_image_path): if os.path.exists(temp_image_path):
os.remove(temp_image_path) os.remove(temp_image_path)
@given('the pdf contains {image_count:d} images on {page_count:d} pages')
@given("the pdf contains {image_count:d} images on {page_count:d} pages")
def step_pdf_contains_images(context, image_count, page_count): def step_pdf_contains_images(context, image_count, page_count):
if not hasattr(context, 'param_name'): if not hasattr(context, "param_name"):
context.param_name = "default" context.param_name = "default"
context.file_name = "genericNonCustomisableName.pdf" context.file_name = "genericNonCustomisableName.pdf"
create_pdf_with_black_boxes(context.file_name, image_count, page_count) create_pdf_with_black_boxes(context.file_name, image_count, page_count)
if not hasattr(context, 'files'): if not hasattr(context, "files"):
context.files = {} context.files = {}
if context.param_name in context.files: if context.param_name in context.files:
context.files[context.param_name].close() context.files[context.param_name].close()
context.files[context.param_name] = open(context.file_name, 'rb') context.files[context.param_name] = open(context.file_name, "rb")
@given('the pdf contains {page_count:d} pages with random text')
def create_pdf_with_black_boxes(file_name, image_count, page_count):
page_width, page_height = letter
writer = PdfWriter()
box_counter = 0
for page in range(page_count):
packet = io.BytesIO()
can = canvas.Canvas(packet, pagesize=(page_width, page_height))
boxes_per_page = image_count // page_count + (
1 if image_count % page_count != 0 else 0
)
for i in range(boxes_per_page):
if box_counter >= image_count:
break
# Create a black box image
dummy_image = Image.new("RGB", (100, 100), color="black")
image_bytes = io.BytesIO()
dummy_image.save(image_bytes, format="PNG")
image_bytes.seek(0)
x = (i % (page_width // 100)) * 100
y = page_height - (((i % (page_height // 100)) + 1) * 100)
if x + 100 > page_width or y < 0:
break
can.drawImage(ImageReader(image_bytes), x, y, width=100, height=100)
box_counter += 1
can.showPage()
can.save()
packet.seek(0)
new_pdf = PdfReader(packet)
writer.add_page(new_pdf.pages[0])
with open(file_name, "wb") as f:
writer.write(f)
@given("the pdf contains {page_count:d} pages with random text")
def step_pdf_contains_pages_with_random_text(context, page_count): def step_pdf_contains_pages_with_random_text(context, page_count):
buffer = io.BytesIO() buffer = io.BytesIO()
c = canvas.Canvas(buffer, pagesize=letter) c = canvas.Canvas(buffer, pagesize=letter)
width, height = letter width, height = letter
for _ in range(page_count): for _ in range(page_count):
text = ''.join(random.choices(string.ascii_letters + string.digits, k=100)) text = "".join(random.choices(string.ascii_letters + string.digits, k=100))
c.drawString(100, height - 100, text) c.drawString(100, height - 100, text)
c.showPage() c.showPage()
c.save() c.save()
with open(context.file_name, 'wb') as f: with open(context.file_name, "wb") as f:
f.write(buffer.getvalue()) f.write(buffer.getvalue())
context.files[context.param_name].close() context.files[context.param_name].close()
context.files[context.param_name] = open(context.file_name, 'rb') context.files[context.param_name] = open(context.file_name, "rb")
@given('the pdf pages all contain the text "{text}"') @given('the pdf pages all contain the text "{text}"')
def step_pdf_pages_contain_text(context, text): def step_pdf_pages_contain_text(context, text):
@ -192,11 +252,12 @@ def step_pdf_pages_contain_text(context, text):
c.save() c.save()
with open(context.file_name, 'wb') as f: with open(context.file_name, "wb") as f:
f.write(buffer.getvalue()) f.write(buffer.getvalue())
context.files[context.param_name].close() context.files[context.param_name].close()
context.files[context.param_name] = open(context.file_name, 'rb') context.files[context.param_name] = open(context.file_name, "rb")
@given('the pdf is encrypted with password "{password}"') @given('the pdf is encrypted with password "{password}"')
def step_encrypt_pdf(context, password): def step_encrypt_pdf(context, password):
@ -205,29 +266,34 @@ def step_encrypt_pdf(context, password):
for i in range(len(reader.pages)): for i in range(len(reader.pages)):
writer.add_page(reader.pages[i]) writer.add_page(reader.pages[i])
writer.encrypt(password) writer.encrypt(password)
with open(context.file_name, 'wb') as f: with open(context.file_name, "wb") as f:
writer.write(f) writer.write(f)
context.files[context.param_name].close() context.files[context.param_name].close()
context.files[context.param_name] = open(context.file_name, 'rb') context.files[context.param_name] = open(context.file_name, "rb")
@given('the request data is')
@given("the request data is")
def step_request_data(context): def step_request_data(context):
context.request_data = eval(context.text) context.request_data = eval(context.text)
@given('the request data includes')
@given("the request data includes")
def step_request_data_table(context): def step_request_data_table(context):
context.request_data = {row['parameter']: row['value'] for row in context.table} context.request_data = {row["parameter"]: row["value"] for row in context.table}
@given('save the generated PDF file as "{filename}" for debugging') @given('save the generated PDF file as "{filename}" for debugging')
def save_generated_pdf(context, filename): def save_generated_pdf(context, filename):
with open(filename, 'wb') as f: with open(filename, "wb") as f:
f.write(context.files[context.param_name].read()) f.write(context.files[context.param_name].read())
print(f"Saved generated PDF content to {filename}") print(f"Saved generated PDF content to {filename}")
######## ########
# WHEN # # WHEN #
######## ########
@when('I send a GET request to "{endpoint}"') @when('I send a GET request to "{endpoint}"')
def step_send_get_request(context, endpoint): def step_send_get_request(context, endpoint):
base_url = "http://localhost:8080" base_url = "http://localhost:8080"
@ -235,20 +301,22 @@ def step_send_get_request(context, endpoint):
response = requests.get(full_url, headers=API_HEADERS) response = requests.get(full_url, headers=API_HEADERS)
context.response = response context.response = response
@when('I send a GET request to "{endpoint}" with parameters') @when('I send a GET request to "{endpoint}" with parameters')
def step_send_get_request_with_params(context, endpoint): def step_send_get_request_with_params(context, endpoint):
base_url = "http://localhost:8080" base_url = "http://localhost:8080"
params = {row['parameter']: row['value'] for row in context.table} params = {row["parameter"]: row["value"] for row in context.table}
full_url = f"{base_url}{endpoint}" full_url = f"{base_url}{endpoint}"
response = requests.get(full_url, params=params, headers=API_HEADERS) response = requests.get(full_url, params=params, headers=API_HEADERS)
context.response = response context.response = response
@when('I send the API request to the endpoint "{endpoint}"') @when('I send the API request to the endpoint "{endpoint}"')
def step_send_api_request(context, endpoint): def step_send_api_request(context, endpoint):
url = f"http://localhost:8080{endpoint}" url = f"http://localhost:8080{endpoint}"
files = context.files if hasattr(context, 'files') else {} files = context.files if hasattr(context, "files") else {}
if not hasattr(context, 'request_data') or context.request_data is None: if not hasattr(context, "request_data") or context.request_data is None:
context.request_data = {} context.request_data = {}
form_data = [] form_data = []
@ -257,130 +325,173 @@ def step_send_api_request(context, endpoint):
for key, file in files.items(): for key, file in files.items():
mime_type, _ = mimetypes.guess_type(file.name) mime_type, _ = mimetypes.guess_type(file.name)
mime_type = mime_type or 'application/octet-stream' mime_type = mime_type or "application/octet-stream"
print(f"form_data {file.name} with {mime_type}") print(f"form_data {file.name} with {mime_type}")
form_data.append((key, (file.name, file, mime_type))) form_data.append((key, (file.name, file, mime_type)))
response = requests.post(url, files=form_data, headers=API_HEADERS) response = requests.post(url, files=form_data, headers=API_HEADERS)
context.response = response context.response = response
######## ########
# THEN # # THEN #
######## ########
@then('the response content type should be "{content_type}"') @then('the response content type should be "{content_type}"')
def step_check_response_content_type(context, content_type): def step_check_response_content_type(context, content_type):
actual_content_type = context.response.headers.get('Content-Type', '') actual_content_type = context.response.headers.get("Content-Type", "")
assert actual_content_type.startswith(content_type), f"Expected {content_type} but got {actual_content_type}. Response content: {context.response.content}" assert actual_content_type.startswith(
content_type
), f"Expected {content_type} but got {actual_content_type}. Response content: {context.response.content}"
@then('the response file should have size greater than {size:d}')
@then("the response file should have size greater than {size:d}")
def step_check_response_file_size(context, size): def step_check_response_file_size(context, size):
response_file = io.BytesIO(context.response.content) response_file = io.BytesIO(context.response.content)
assert len(response_file.getvalue()) > size assert len(response_file.getvalue()) > size
@then('the response PDF is not passworded')
@then("the response PDF is not passworded")
def step_check_response_pdf_not_passworded(context): def step_check_response_pdf_not_passworded(context):
response_file = io.BytesIO(context.response.content) response_file = io.BytesIO(context.response.content)
reader = PdfReader(response_file) reader = PdfReader(response_file)
assert not reader.is_encrypted assert not reader.is_encrypted
@then('the response PDF is passworded')
@then("the response PDF is passworded")
def step_check_response_pdf_passworded(context): def step_check_response_pdf_passworded(context):
response_file = io.BytesIO(context.response.content) response_file = io.BytesIO(context.response.content)
try: try:
reader = PdfReader(response_file) reader = PdfReader(response_file)
assert reader.is_encrypted assert reader.is_encrypted
except PdfReadError as e: except PdfReadError as e:
raise AssertionError(f"Failed to read PDF: {str(e)}. Response content: {context.response.content}") raise AssertionError(
f"Failed to read PDF: {str(e)}. Response content: {context.response.content}"
)
except Exception as e: except Exception as e:
raise AssertionError(f"An error occurred: {str(e)}. Response content: {context.response.content}") raise AssertionError(
f"An error occurred: {str(e)}. Response content: {context.response.content}"
)
@then('the response status code should be {status_code:d}')
@then("the response status code should be {status_code:d}")
def step_check_response_status_code(context, status_code): def step_check_response_status_code(context, status_code):
assert context.response.status_code == status_code, f"Expected status code {status_code} but got {context.response.status_code}" assert (
context.response.status_code == status_code
), f"Expected status code {status_code} but got {context.response.status_code}"
@then('the response should contain error message "{message}"') @then('the response should contain error message "{message}"')
def step_check_response_error_message(context, message): def step_check_response_error_message(context, message):
response_json = context.response.json() response_json = context.response.json()
assert response_json.get('error') == message, f"Expected error message '{message}' but got '{response_json.get('error')}'" assert (
response_json.get("error") == message
), f"Expected error message '{message}' but got '{response_json.get('error')}'"
@then('the response PDF should contain {page_count:d} pages')
def step_check_response_pdf_page_count(context, page_count):
response_file = io.BytesIO(context.response.content)
reader = PdfReader(response_file)
assert len(reader.pages) == page_count, f"Expected {page_count} pages but got {len(reader.pages)} pages"
@then('the response PDF metadata should include "{metadata_key}" as "{metadata_value}"') @then('the response PDF metadata should include "{metadata_key}" as "{metadata_value}"')
def step_check_response_pdf_metadata(context, metadata_key, metadata_value): def step_check_response_pdf_metadata(context, metadata_key, metadata_value):
response_file = io.BytesIO(context.response.content) response_file = io.BytesIO(context.response.content)
reader = PdfReader(response_file) reader = PdfReader(response_file)
metadata = reader.metadata metadata = reader.metadata
assert metadata.get("/" + metadata_key) == metadata_value, f"Expected {metadata_key} to be '{metadata_value}' but got '{metadata.get(metadata_key)}'" assert (
metadata.get("/" + metadata_key) == metadata_value
), f"Expected {metadata_key} to be '{metadata_value}' but got '{metadata.get(metadata_key)}'"
@then('the response file should have extension "{extension}"') @then('the response file should have extension "{extension}"')
def step_check_response_file_extension(context, extension): def step_check_response_file_extension(context, extension):
content_disposition = context.response.headers.get('Content-Disposition', '') content_disposition = context.response.headers.get("Content-Disposition", "")
filename = "" filename = ""
if content_disposition: if content_disposition:
parts = content_disposition.split(';') parts = content_disposition.split(";")
for part in parts: for part in parts:
if part.strip().startswith('filename'): if part.strip().startswith("filename"):
filename = part.split('=')[1].strip().strip('"') filename = part.split("=")[1].strip().strip('"')
break break
assert filename.endswith(extension), f"Expected file extension {extension} but got {filename}. Response content: {context.response.content}" assert filename.endswith(
extension
), f"Expected file extension {extension} but got {filename}. Response content: {context.response.content}"
@then('save the response file as "{filename}" for debugging') @then('save the response file as "{filename}" for debugging')
def step_save_response_file(context, filename): def step_save_response_file(context, filename):
with open(filename, 'wb') as f: with open(filename, "wb") as f:
f.write(context.response.content) f.write(context.response.content)
print(f"Saved response content to {filename}") print(f"Saved response content to {filename}")
@then('the response PDF should contain {page_count:d} pages')
@then("the response PDF should contain {page_count:d} pages")
def step_check_response_pdf_page_count(context, page_count): def step_check_response_pdf_page_count(context, page_count):
response_file = io.BytesIO(context.response.content) response_file = io.BytesIO(context.response.content)
reader = PdfReader(io.BytesIO(response_file.getvalue())) reader = PdfReader(io.BytesIO(response_file.getvalue()))
actual_page_count = len(reader.pages) actual_page_count = len(reader.pages)
assert actual_page_count == page_count, f"Expected {page_count} pages but got {actual_page_count} pages" assert (
actual_page_count == page_count
), f"Expected {page_count} pages but got {actual_page_count} pages"
@then('the response ZIP should contain {file_count:d} files')
@then("the response ZIP should contain {file_count:d} files")
def step_check_response_zip_file_count(context, file_count): def step_check_response_zip_file_count(context, file_count):
response_file = io.BytesIO(context.response.content) response_file = io.BytesIO(context.response.content)
with zipfile.ZipFile(io.BytesIO(response_file.getvalue())) as zip_file: with zipfile.ZipFile(io.BytesIO(response_file.getvalue())) as zip_file:
actual_file_count = len(zip_file.namelist()) actual_file_count = len(zip_file.namelist())
assert actual_file_count == file_count, f"Expected {file_count} files but got {actual_file_count} files" assert (
actual_file_count == file_count
), f"Expected {file_count} files but got {actual_file_count} files"
@then('the response ZIP file should contain {doc_count:d} documents each having {pages_per_doc:d} pages')
@then(
"the response ZIP file should contain {doc_count:d} documents each having {pages_per_doc:d} pages"
)
def step_check_response_zip_doc_page_count(context, doc_count, pages_per_doc): def step_check_response_zip_doc_page_count(context, doc_count, pages_per_doc):
response_file = io.BytesIO(context.response.content) response_file = io.BytesIO(context.response.content)
with zipfile.ZipFile(io.BytesIO(response_file.getvalue())) as zip_file: with zipfile.ZipFile(io.BytesIO(response_file.getvalue())) as zip_file:
actual_doc_count = len(zip_file.namelist()) actual_doc_count = len(zip_file.namelist())
assert actual_doc_count == doc_count, f"Expected {doc_count} documents but got {actual_doc_count} documents" assert (
actual_doc_count == doc_count
), f"Expected {doc_count} documents but got {actual_doc_count} documents"
for file_name in zip_file.namelist(): for file_name in zip_file.namelist():
with zip_file.open(file_name) as pdf_file: with zip_file.open(file_name) as pdf_file:
reader = PdfReader(pdf_file) reader = PdfReader(pdf_file)
actual_pages_per_doc = len(reader.pages) actual_pages_per_doc = len(reader.pages)
assert actual_pages_per_doc == pages_per_doc, f"Expected {pages_per_doc} pages per document but got {actual_pages_per_doc} pages in document {file_name}" assert (
actual_pages_per_doc == pages_per_doc
), f"Expected {pages_per_doc} pages per document but got {actual_pages_per_doc} pages in document {file_name}"
@then('the JSON value of "{key}" should be "{expected_value}"') @then('the JSON value of "{key}" should be "{expected_value}"')
def step_check_json_value(context, key, expected_value): def step_check_json_value(context, key, expected_value):
actual_value = context.response.json().get(key) actual_value = context.response.json().get(key)
assert actual_value == expected_value, \ assert (
f"Expected JSON value for '{key}' to be '{expected_value}' but got '{actual_value}'" actual_value == expected_value
), f"Expected JSON value for '{key}' to be '{expected_value}' but got '{actual_value}'"
@then('JSON list entry containing "{identifier_key}" as "{identifier_value}" should have "{target_key}" as "{target_value}"')
def step_check_json_list_entry(context, identifier_key, identifier_self, target_key, target_value): @then(
'JSON list entry containing "{identifier_key}" as "{identifier_value}" should have "{target_key}" as "{target_value}"'
)
def step_check_json_list_entry(
context, identifier_key, identifier_self, target_key, target_value
):
json_response = context.response.json() json_response = context.response.json()
for entry in json_response: for entry in json_response:
if entry.get(identifier_key) == identifier_value: if entry.get(identifier_key) == identifier_value:
assert entry.get(target_key) == target_value, \ assert (
f"Expected {target_key} to be {target_value} in entry where {identifier_key} is {identifier_value}, but found {entry.get(target_key)}" entry.get(target_key) == target_value
), f"Expected {target_key} to be {target_value} in entry where {identifier_key} is {identifier_value}, but found {entry.get(target_key)}"
break break
else: else:
raise AssertionError(f"No entry with {identifier_key} as {identifier_value} found") raise AssertionError(
f"No entry with {identifier_key} as {identifier_value} found"
)
@then('the response should match the regex "{pattern}"') @then('the response should match the regex "{pattern}"')
def step_response_matches_regex(context, pattern): def step_response_matches_regex(context, pattern):
response_text = context.response.text response_text = context.response.text
assert re.match(pattern, response_text), \ assert re.match(
f"Response '{response_text}' does not match the expected pattern '{pattern}'" pattern, response_text
), f"Response '{response_text}' does not match the expected pattern '{pattern}'"