2021-12-13 00:15:37 +01:00
|
|
|
const Path = require('path')
|
2022-07-06 02:53:01 +02:00
|
|
|
const fs = require('../libs/fsExtra')
|
2021-12-13 00:15:37 +01:00
|
|
|
const stream = require('stream')
|
2022-03-20 22:41:06 +01:00
|
|
|
const Logger = require('../Logger')
|
|
|
|
const { resizeImage } = require('../utils/ffmpegHelpers')
|
2023-09-18 22:08:19 +02:00
|
|
|
const { encodeUriPath } = require('../utils/fileUtils')
|
2024-11-02 08:05:30 +01:00
|
|
|
const Database = require('../Database')
|
2021-12-13 00:15:37 +01:00
|
|
|
|
|
|
|
class CacheManager {
|
2022-02-27 20:47:52 +01:00
|
|
|
constructor() {
|
2023-09-07 00:48:50 +02:00
|
|
|
this.CachePath = null
|
|
|
|
this.CoverCachePath = null
|
|
|
|
this.ImageCachePath = null
|
|
|
|
this.ItemCachePath = null
|
|
|
|
}
|
|
|
|
|
|
|
|
/**
|
|
|
|
* Create cache directory paths if they dont exist
|
|
|
|
*/
|
2024-08-20 04:23:41 +02:00
|
|
|
async ensureCachePaths() {
|
|
|
|
// Creates cache paths if necessary and sets owner and permissions
|
2022-02-27 20:47:52 +01:00
|
|
|
this.CachePath = Path.join(global.MetadataPath, 'cache')
|
2021-12-13 00:15:37 +01:00
|
|
|
this.CoverCachePath = Path.join(this.CachePath, 'covers')
|
2022-03-13 16:35:35 +01:00
|
|
|
this.ImageCachePath = Path.join(this.CachePath, 'images')
|
2022-09-25 22:56:06 +02:00
|
|
|
this.ItemCachePath = Path.join(this.CachePath, 'items')
|
2022-05-15 16:51:08 +02:00
|
|
|
|
2024-08-20 04:25:01 +02:00
|
|
|
await fs.ensureDir(this.CachePath)
|
|
|
|
await fs.ensureDir(this.CoverCachePath)
|
|
|
|
await fs.ensureDir(this.ImageCachePath)
|
|
|
|
await fs.ensureDir(this.ItemCachePath)
|
2021-12-13 00:15:37 +01:00
|
|
|
}
|
|
|
|
|
2024-11-02 08:05:30 +01:00
|
|
|
async handleCoverCache(res, libraryItemId, options = {}) {
|
2021-12-13 00:15:37 +01:00
|
|
|
const format = options.format || 'webp'
|
|
|
|
const width = options.width || 400
|
|
|
|
const height = options.height || null
|
|
|
|
|
|
|
|
res.type(`image/${format}`)
|
|
|
|
|
2024-11-02 08:05:30 +01:00
|
|
|
const cachePath = Path.join(this.CoverCachePath, `${libraryItemId}_${width}${height ? `x${height}` : ''}`) + '.' + format
|
2021-12-13 00:15:37 +01:00
|
|
|
|
|
|
|
// Cache exists
|
2024-11-02 08:05:30 +01:00
|
|
|
if (await fs.pathExists(cachePath)) {
|
Implement X-Accel Redirect
This patch implements [X-Accel](https://www.nginx.com/resources/wiki/start/topics/examples/x-accel/)
redirect headers as an optional way for offloading static file delivery
from Express to Nginx, which is far better optimized for static file
delivery.
This provides a really easy to configure way for getting a huge
performance boost over delivering all files through Audiobookshelf.
How it works
------------
The way this works is basically that Audiobookshelf gets an HTTP request
for delivering a static file (let's say an audiobook). It will first
check the user is authorized and then convert the API path to a local
file path.
Now, instead of reading and delivering the file, Audiobookshelf will
return just the HTTP header with an additional `X-Accel-Redirect`
pointing to the file location on the file syste.
This header is picked up by Nginx which will then deliver the file.
Configuration
-------------
The configuration for this is very simple. You need to run Nginx as
reverse proxy and it must have access to your Audiobookshelf data
folder.
You then configure Audiobookshelf to use X-Accel by setting
`USE_X_ACCEL=/protected`. The path is the internal redirect path used by
Nginx.
In the Nginx configuration you then configure this location and map it
to the storage area to serve like this:
```
location /protected/ {
internal;
alias /;
}
```
That's all.
Impact
------
I just did a very simple performance test, downloading a 1170620819
bytes large audiobook file from another machine on the same network
like this, using `time -p` to measure how log the process took:
```sh
URL='https://url to audiobook…'
for i in `seq 1 50`
do
echo "$i"
curl -s -o /dev/null "${URL}"
done
```
This sequential test with 50 iterations and without x-accel resulted in:
```
real 413.42
user 197.11
sys 82.04
```
That is an average download speed of about 1080 MBit/s.
With X-Accel enabled, serving the files through Nginx, the same test
yielded the following results:
```
real 200.37
user 86.95
sys 29.79
```
That is an average download speed of about 2229 MBit/s, more than
doubling the previous speed.
I have also run the same test with 4 parallel processes and 25 downloads
each. Without x-accel, that test resulted in:
```
real 364.89
user 273.09
sys 112.75
```
That is an average speed of about 2448 MBit/s.
With X-Accel enabled, the parallel test also shows a significant
speedup:
```
real 167.19
user 195.62
sys 78.61
```
That is an average speed of about 5342 MBit/s.
While doing that, I also peaked at the system load which was a bit lower
when using X-Accel. Even though the system was delivering far more data.
But I just looked at the `load1` values and did not build a proper test
for that. That means, I cant provide any definitive data.
Supported Media
---------------
The current implementation works for audio files and book covers. There
are other media files which would benefit from this mechanism like feed
covers or author pictures.
But that's something for a future developer ;-)
2022-11-25 23:41:35 +01:00
|
|
|
if (global.XAccel) {
|
2024-11-02 08:05:30 +01:00
|
|
|
const encodedURI = encodeUriPath(global.XAccel + cachePath)
|
2023-09-18 22:08:19 +02:00
|
|
|
Logger.debug(`Use X-Accel to serve static file ${encodedURI}`)
|
|
|
|
return res.status(204).header({ 'X-Accel-Redirect': encodedURI }).send()
|
Implement X-Accel Redirect
This patch implements [X-Accel](https://www.nginx.com/resources/wiki/start/topics/examples/x-accel/)
redirect headers as an optional way for offloading static file delivery
from Express to Nginx, which is far better optimized for static file
delivery.
This provides a really easy to configure way for getting a huge
performance boost over delivering all files through Audiobookshelf.
How it works
------------
The way this works is basically that Audiobookshelf gets an HTTP request
for delivering a static file (let's say an audiobook). It will first
check the user is authorized and then convert the API path to a local
file path.
Now, instead of reading and delivering the file, Audiobookshelf will
return just the HTTP header with an additional `X-Accel-Redirect`
pointing to the file location on the file syste.
This header is picked up by Nginx which will then deliver the file.
Configuration
-------------
The configuration for this is very simple. You need to run Nginx as
reverse proxy and it must have access to your Audiobookshelf data
folder.
You then configure Audiobookshelf to use X-Accel by setting
`USE_X_ACCEL=/protected`. The path is the internal redirect path used by
Nginx.
In the Nginx configuration you then configure this location and map it
to the storage area to serve like this:
```
location /protected/ {
internal;
alias /;
}
```
That's all.
Impact
------
I just did a very simple performance test, downloading a 1170620819
bytes large audiobook file from another machine on the same network
like this, using `time -p` to measure how log the process took:
```sh
URL='https://url to audiobook…'
for i in `seq 1 50`
do
echo "$i"
curl -s -o /dev/null "${URL}"
done
```
This sequential test with 50 iterations and without x-accel resulted in:
```
real 413.42
user 197.11
sys 82.04
```
That is an average download speed of about 1080 MBit/s.
With X-Accel enabled, serving the files through Nginx, the same test
yielded the following results:
```
real 200.37
user 86.95
sys 29.79
```
That is an average download speed of about 2229 MBit/s, more than
doubling the previous speed.
I have also run the same test with 4 parallel processes and 25 downloads
each. Without x-accel, that test resulted in:
```
real 364.89
user 273.09
sys 112.75
```
That is an average speed of about 2448 MBit/s.
With X-Accel enabled, the parallel test also shows a significant
speedup:
```
real 167.19
user 195.62
sys 78.61
```
That is an average speed of about 5342 MBit/s.
While doing that, I also peaked at the system load which was a bit lower
when using X-Accel. Even though the system was delivering far more data.
But I just looked at the `load1` values and did not build a proper test
for that. That means, I cant provide any definitive data.
Supported Media
---------------
The current implementation works for audio files and book covers. There
are other media files which would benefit from this mechanism like feed
covers or author pictures.
But that's something for a future developer ;-)
2022-11-25 23:41:35 +01:00
|
|
|
}
|
|
|
|
|
2024-11-02 08:05:30 +01:00
|
|
|
const r = fs.createReadStream(cachePath)
|
2021-12-13 00:15:37 +01:00
|
|
|
const ps = new stream.PassThrough()
|
|
|
|
stream.pipeline(r, ps, (err) => {
|
|
|
|
if (err) {
|
|
|
|
console.log(err)
|
2022-11-12 16:36:00 +01:00
|
|
|
return res.sendStatus(500)
|
2021-12-13 00:15:37 +01:00
|
|
|
}
|
|
|
|
})
|
|
|
|
return ps.pipe(res)
|
|
|
|
}
|
|
|
|
|
2024-11-02 08:05:30 +01:00
|
|
|
// Cached cover does not exist, generate it
|
2024-11-02 18:56:40 +01:00
|
|
|
const coverPath = await Database.libraryItemModel.getCoverPath(libraryItemId)
|
2024-11-02 08:05:30 +01:00
|
|
|
if (!coverPath || !(await fs.pathExists(coverPath))) {
|
|
|
|
return res.sendStatus(404)
|
|
|
|
}
|
|
|
|
|
|
|
|
const writtenFile = await resizeImage(coverPath, cachePath, width, height)
|
2022-11-12 16:36:00 +01:00
|
|
|
if (!writtenFile) return res.sendStatus(500)
|
2021-12-14 22:19:23 +01:00
|
|
|
|
Implement X-Accel Redirect
This patch implements [X-Accel](https://www.nginx.com/resources/wiki/start/topics/examples/x-accel/)
redirect headers as an optional way for offloading static file delivery
from Express to Nginx, which is far better optimized for static file
delivery.
This provides a really easy to configure way for getting a huge
performance boost over delivering all files through Audiobookshelf.
How it works
------------
The way this works is basically that Audiobookshelf gets an HTTP request
for delivering a static file (let's say an audiobook). It will first
check the user is authorized and then convert the API path to a local
file path.
Now, instead of reading and delivering the file, Audiobookshelf will
return just the HTTP header with an additional `X-Accel-Redirect`
pointing to the file location on the file syste.
This header is picked up by Nginx which will then deliver the file.
Configuration
-------------
The configuration for this is very simple. You need to run Nginx as
reverse proxy and it must have access to your Audiobookshelf data
folder.
You then configure Audiobookshelf to use X-Accel by setting
`USE_X_ACCEL=/protected`. The path is the internal redirect path used by
Nginx.
In the Nginx configuration you then configure this location and map it
to the storage area to serve like this:
```
location /protected/ {
internal;
alias /;
}
```
That's all.
Impact
------
I just did a very simple performance test, downloading a 1170620819
bytes large audiobook file from another machine on the same network
like this, using `time -p` to measure how log the process took:
```sh
URL='https://url to audiobook…'
for i in `seq 1 50`
do
echo "$i"
curl -s -o /dev/null "${URL}"
done
```
This sequential test with 50 iterations and without x-accel resulted in:
```
real 413.42
user 197.11
sys 82.04
```
That is an average download speed of about 1080 MBit/s.
With X-Accel enabled, serving the files through Nginx, the same test
yielded the following results:
```
real 200.37
user 86.95
sys 29.79
```
That is an average download speed of about 2229 MBit/s, more than
doubling the previous speed.
I have also run the same test with 4 parallel processes and 25 downloads
each. Without x-accel, that test resulted in:
```
real 364.89
user 273.09
sys 112.75
```
That is an average speed of about 2448 MBit/s.
With X-Accel enabled, the parallel test also shows a significant
speedup:
```
real 167.19
user 195.62
sys 78.61
```
That is an average speed of about 5342 MBit/s.
While doing that, I also peaked at the system load which was a bit lower
when using X-Accel. Even though the system was delivering far more data.
But I just looked at the `load1` values and did not build a proper test
for that. That means, I cant provide any definitive data.
Supported Media
---------------
The current implementation works for audio files and book covers. There
are other media files which would benefit from this mechanism like feed
covers or author pictures.
But that's something for a future developer ;-)
2022-11-25 23:41:35 +01:00
|
|
|
if (global.XAccel) {
|
2023-09-18 22:08:19 +02:00
|
|
|
const encodedURI = encodeUriPath(global.XAccel + writtenFile)
|
|
|
|
Logger.debug(`Use X-Accel to serve static file ${encodedURI}`)
|
|
|
|
return res.status(204).header({ 'X-Accel-Redirect': encodedURI }).send()
|
Implement X-Accel Redirect
This patch implements [X-Accel](https://www.nginx.com/resources/wiki/start/topics/examples/x-accel/)
redirect headers as an optional way for offloading static file delivery
from Express to Nginx, which is far better optimized for static file
delivery.
This provides a really easy to configure way for getting a huge
performance boost over delivering all files through Audiobookshelf.
How it works
------------
The way this works is basically that Audiobookshelf gets an HTTP request
for delivering a static file (let's say an audiobook). It will first
check the user is authorized and then convert the API path to a local
file path.
Now, instead of reading and delivering the file, Audiobookshelf will
return just the HTTP header with an additional `X-Accel-Redirect`
pointing to the file location on the file syste.
This header is picked up by Nginx which will then deliver the file.
Configuration
-------------
The configuration for this is very simple. You need to run Nginx as
reverse proxy and it must have access to your Audiobookshelf data
folder.
You then configure Audiobookshelf to use X-Accel by setting
`USE_X_ACCEL=/protected`. The path is the internal redirect path used by
Nginx.
In the Nginx configuration you then configure this location and map it
to the storage area to serve like this:
```
location /protected/ {
internal;
alias /;
}
```
That's all.
Impact
------
I just did a very simple performance test, downloading a 1170620819
bytes large audiobook file from another machine on the same network
like this, using `time -p` to measure how log the process took:
```sh
URL='https://url to audiobook…'
for i in `seq 1 50`
do
echo "$i"
curl -s -o /dev/null "${URL}"
done
```
This sequential test with 50 iterations and without x-accel resulted in:
```
real 413.42
user 197.11
sys 82.04
```
That is an average download speed of about 1080 MBit/s.
With X-Accel enabled, serving the files through Nginx, the same test
yielded the following results:
```
real 200.37
user 86.95
sys 29.79
```
That is an average download speed of about 2229 MBit/s, more than
doubling the previous speed.
I have also run the same test with 4 parallel processes and 25 downloads
each. Without x-accel, that test resulted in:
```
real 364.89
user 273.09
sys 112.75
```
That is an average speed of about 2448 MBit/s.
With X-Accel enabled, the parallel test also shows a significant
speedup:
```
real 167.19
user 195.62
sys 78.61
```
That is an average speed of about 5342 MBit/s.
While doing that, I also peaked at the system load which was a bit lower
when using X-Accel. Even though the system was delivering far more data.
But I just looked at the `load1` values and did not build a proper test
for that. That means, I cant provide any definitive data.
Supported Media
---------------
The current implementation works for audio files and book covers. There
are other media files which would benefit from this mechanism like feed
covers or author pictures.
But that's something for a future developer ;-)
2022-11-25 23:41:35 +01:00
|
|
|
}
|
|
|
|
|
2021-12-13 20:29:31 +01:00
|
|
|
var readStream = fs.createReadStream(writtenFile)
|
2021-12-13 00:15:37 +01:00
|
|
|
readStream.pipe(res)
|
|
|
|
}
|
|
|
|
|
2022-03-15 00:53:49 +01:00
|
|
|
purgeCoverCache(libraryItemId) {
|
|
|
|
return this.purgeEntityCache(libraryItemId, this.CoverCachePath)
|
|
|
|
}
|
|
|
|
|
|
|
|
purgeImageCache(entityId) {
|
|
|
|
return this.purgeEntityCache(entityId, this.ImageCachePath)
|
|
|
|
}
|
|
|
|
|
|
|
|
async purgeEntityCache(entityId, cachePath) {
|
2024-08-20 04:23:41 +02:00
|
|
|
return Promise.all(
|
|
|
|
(await fs.readdir(cachePath)).reduce((promises, file) => {
|
|
|
|
if (file.startsWith(entityId)) {
|
|
|
|
Logger.debug(`[CacheManager] Going to purge ${file}`)
|
|
|
|
promises.push(this.removeCache(Path.join(cachePath, file)))
|
|
|
|
}
|
|
|
|
return promises
|
|
|
|
}, [])
|
|
|
|
)
|
2021-12-13 00:15:37 +01:00
|
|
|
}
|
|
|
|
|
|
|
|
removeCache(path) {
|
|
|
|
if (!path) return false
|
|
|
|
return fs.pathExists(path).then((exists) => {
|
|
|
|
if (!exists) return false
|
2024-08-20 04:23:41 +02:00
|
|
|
return fs
|
|
|
|
.unlink(path)
|
|
|
|
.then(() => true)
|
|
|
|
.catch((err) => {
|
|
|
|
Logger.error(`[CacheManager] Failed to remove cache "${path}"`, err)
|
|
|
|
return false
|
|
|
|
})
|
2021-12-13 00:15:37 +01:00
|
|
|
})
|
|
|
|
}
|
|
|
|
|
|
|
|
async purgeAll() {
|
2023-07-14 22:04:27 +02:00
|
|
|
Logger.info(`[CacheManager] Purging all cache at "${this.CachePath}"`)
|
2021-12-13 00:15:37 +01:00
|
|
|
if (await fs.pathExists(this.CachePath)) {
|
|
|
|
await fs.remove(this.CachePath).catch((error) => {
|
|
|
|
Logger.error(`[CacheManager] Failed to remove cache dir "${this.CachePath}"`, error)
|
|
|
|
})
|
2022-10-02 21:46:48 +02:00
|
|
|
}
|
|
|
|
await this.ensureCachePaths()
|
|
|
|
}
|
|
|
|
|
|
|
|
async purgeItems() {
|
2023-07-14 22:04:27 +02:00
|
|
|
Logger.info(`[CacheManager] Purging items cache at "${this.ItemCachePath}"`)
|
2022-10-02 21:46:48 +02:00
|
|
|
if (await fs.pathExists(this.ItemCachePath)) {
|
|
|
|
await fs.remove(this.ItemCachePath).catch((error) => {
|
|
|
|
Logger.error(`[CacheManager] Failed to remove items cache dir "${this.ItemCachePath}"`, error)
|
|
|
|
})
|
2021-12-13 00:15:37 +01:00
|
|
|
}
|
2022-05-15 18:19:04 +02:00
|
|
|
await this.ensureCachePaths()
|
2021-12-13 00:15:37 +01:00
|
|
|
}
|
2022-03-13 16:35:35 +01:00
|
|
|
|
2024-08-31 20:27:48 +02:00
|
|
|
/**
|
|
|
|
*
|
|
|
|
* @param {import('express').Response} res
|
2024-11-03 07:44:57 +01:00
|
|
|
* @param {String} authorId
|
2024-08-31 20:27:48 +02:00
|
|
|
* @param {{ format?: string, width?: number, height?: number }} options
|
|
|
|
* @returns
|
|
|
|
*/
|
2024-11-03 07:44:57 +01:00
|
|
|
async handleAuthorCache(res, authorId, options = {}) {
|
2022-03-13 16:35:35 +01:00
|
|
|
const format = options.format || 'webp'
|
|
|
|
const width = options.width || 400
|
|
|
|
const height = options.height || null
|
|
|
|
|
|
|
|
res.type(`image/${format}`)
|
|
|
|
|
2024-11-03 07:44:57 +01:00
|
|
|
var cachePath = Path.join(this.ImageCachePath, `${authorId}_${width}${height ? `x${height}` : ''}`) + '.' + format
|
2022-03-13 16:35:35 +01:00
|
|
|
|
|
|
|
// Cache exists
|
2024-11-03 07:44:57 +01:00
|
|
|
if (await fs.pathExists(cachePath)) {
|
|
|
|
const r = fs.createReadStream(cachePath)
|
2022-03-13 16:35:35 +01:00
|
|
|
const ps = new stream.PassThrough()
|
|
|
|
stream.pipeline(r, ps, (err) => {
|
|
|
|
if (err) {
|
|
|
|
console.log(err)
|
2022-11-16 22:32:32 +01:00
|
|
|
return res.sendStatus(500)
|
2022-03-13 16:35:35 +01:00
|
|
|
}
|
|
|
|
})
|
|
|
|
return ps.pipe(res)
|
|
|
|
}
|
|
|
|
|
2024-11-03 07:44:57 +01:00
|
|
|
const author = await Database.authorModel.findByPk(authorId)
|
|
|
|
if (!author || !author.imagePath || !(await fs.pathExists(author.imagePath))) {
|
|
|
|
return res.sendStatus(404)
|
|
|
|
}
|
|
|
|
|
|
|
|
let writtenFile = await resizeImage(author.imagePath, cachePath, width, height)
|
2022-11-16 22:32:32 +01:00
|
|
|
if (!writtenFile) return res.sendStatus(500)
|
2022-03-13 16:35:35 +01:00
|
|
|
|
|
|
|
var readStream = fs.createReadStream(writtenFile)
|
|
|
|
readStream.pipe(res)
|
|
|
|
}
|
2021-12-13 00:15:37 +01:00
|
|
|
}
|
2024-08-20 04:23:41 +02:00
|
|
|
module.exports = new CacheManager()
|