mirror of
https://github.com/blakeblackshear/frigate.git
synced 2024-11-26 19:06:11 +01:00
Remove device config and use model size to configure device used (#14290)
* Remove device config and use model size to configure device used * Don't show Frigate+ submission when in progress * Add docs link for bounding box colors
This commit is contained in:
parent
8a8a0c7dec
commit
6e332bbdf8
@ -518,9 +518,8 @@ semantic_search:
|
|||||||
enabled: False
|
enabled: False
|
||||||
# Optional: Re-index embeddings database from historical tracked objects (default: shown below)
|
# Optional: Re-index embeddings database from historical tracked objects (default: shown below)
|
||||||
reindex: False
|
reindex: False
|
||||||
# Optional: Set device used to run embeddings, options are AUTO, CPU, GPU. (default: shown below)
|
|
||||||
device: "AUTO"
|
|
||||||
# Optional: Set the model size used for embeddings. (default: shown below)
|
# Optional: Set the model size used for embeddings. (default: shown below)
|
||||||
|
# NOTE: small model runs on CPU and large model runs on GPU
|
||||||
model_size: "small"
|
model_size: "small"
|
||||||
|
|
||||||
# Optional: Configuration for AI generated tracked object descriptions
|
# Optional: Configuration for AI generated tracked object descriptions
|
||||||
|
@ -29,25 +29,26 @@ If you are enabling the Search feature for the first time, be advised that Friga
|
|||||||
|
|
||||||
### Jina AI CLIP
|
### Jina AI CLIP
|
||||||
|
|
||||||
:::tip
|
|
||||||
|
|
||||||
The CLIP models are downloaded in ONNX format, which means they will be accelerated using GPU hardware when available. This depends on the Docker build that is used. See [the object detector docs](../configuration/object_detectors.md) for more information.
|
|
||||||
|
|
||||||
:::
|
|
||||||
|
|
||||||
The vision model is able to embed both images and text into the same vector space, which allows `image -> image` and `text -> image` similarity searches. Frigate uses this model on tracked objects to encode the thumbnail image and store it in the database. When searching for tracked objects via text in the search box, Frigate will perform a `text -> image` similarity search against this embedding. When clicking "Find Similar" in the tracked object detail pane, Frigate will perform an `image -> image` similarity search to retrieve the closest matching thumbnails.
|
The vision model is able to embed both images and text into the same vector space, which allows `image -> image` and `text -> image` similarity searches. Frigate uses this model on tracked objects to encode the thumbnail image and store it in the database. When searching for tracked objects via text in the search box, Frigate will perform a `text -> image` similarity search against this embedding. When clicking "Find Similar" in the tracked object detail pane, Frigate will perform an `image -> image` similarity search to retrieve the closest matching thumbnails.
|
||||||
|
|
||||||
The text model is used to embed tracked object descriptions and perform searches against them. Descriptions can be created, viewed, and modified on the Search page when clicking on the gray tracked object chip at the top left of each review item. See [the Generative AI docs](/configuration/genai.md) for more information on how to automatically generate tracked object descriptions.
|
The text model is used to embed tracked object descriptions and perform searches against them. Descriptions can be created, viewed, and modified on the Search page when clicking on the gray tracked object chip at the top left of each review item. See [the Generative AI docs](/configuration/genai.md) for more information on how to automatically generate tracked object descriptions.
|
||||||
|
|
||||||
Differently weighted CLIP models are available and can be selected by setting the `model_size` config option:
|
Differently weighted CLIP models are available and can be selected by setting the `model_size` config option:
|
||||||
|
|
||||||
|
:::tip
|
||||||
|
|
||||||
|
The CLIP models are downloaded in ONNX format, which means they will be accelerated using GPU hardware when available. This depends on the Docker build that is used. See [the object detector docs](../configuration/object_detectors.md) for more information.
|
||||||
|
|
||||||
|
:::
|
||||||
|
|
||||||
```yaml
|
```yaml
|
||||||
semantic_search:
|
semantic_search:
|
||||||
enabled: True
|
enabled: True
|
||||||
model_size: small
|
model_size: small
|
||||||
```
|
```
|
||||||
|
|
||||||
Using `large` as the model size setting employs the full Jina model appropriate for high performance systems running a GPU. The `small` size uses a quantized version of the model that uses much less RAM and runs faster on CPU with a very negligible difference in embedding quality. Most users will not need to change this setting from the default of `small`.
|
- Configuring the `large` model employs the full Jina model and will automatically run on the GPU if applicable.
|
||||||
|
- Configuring the `small` model employs a quantized version of the model that uses much less RAM and runs faster on CPU with a very negligible difference in embedding quality.
|
||||||
|
|
||||||
## Usage
|
## Usage
|
||||||
|
|
||||||
|
@ -12,7 +12,6 @@ class SemanticSearchConfig(FrigateBaseModel):
|
|||||||
reindex: Optional[bool] = Field(
|
reindex: Optional[bool] = Field(
|
||||||
default=False, title="Reindex all detections on startup."
|
default=False, title="Reindex all detections on startup."
|
||||||
)
|
)
|
||||||
device: str = Field(default="AUTO", title="Device Type")
|
|
||||||
model_size: str = Field(
|
model_size: str = Field(
|
||||||
default="small", title="The size of the embeddings model used."
|
default="small", title="The size of the embeddings model used."
|
||||||
)
|
)
|
||||||
|
@ -127,7 +127,7 @@ class Embeddings:
|
|||||||
model_size=config.model_size,
|
model_size=config.model_size,
|
||||||
model_type="vision",
|
model_type="vision",
|
||||||
requestor=self.requestor,
|
requestor=self.requestor,
|
||||||
device=self.config.device,
|
device="GPU" if config.model_size == "large" else "CPU",
|
||||||
)
|
)
|
||||||
|
|
||||||
def upsert_thumbnail(self, event_id: str, thumbnail: bytes):
|
def upsert_thumbnail(self, event_id: str, thumbnail: bytes):
|
||||||
|
@ -554,7 +554,7 @@ function ObjectSnapshotTab({
|
|||||||
</div>
|
</div>
|
||||||
|
|
||||||
<div className="flex flex-row justify-center gap-2 md:justify-end">
|
<div className="flex flex-row justify-center gap-2 md:justify-end">
|
||||||
{state == "reviewing" && (
|
{state == "reviewing" && search.end_time && (
|
||||||
<>
|
<>
|
||||||
<Button
|
<Button
|
||||||
className="bg-success"
|
className="bg-success"
|
||||||
|
@ -16,6 +16,8 @@ import useDeepMemo from "@/hooks/use-deep-memo";
|
|||||||
import { Card } from "@/components/ui/card";
|
import { Card } from "@/components/ui/card";
|
||||||
import { getIconForLabel } from "@/utils/iconUtil";
|
import { getIconForLabel } from "@/utils/iconUtil";
|
||||||
import { capitalizeFirstLetter } from "@/utils/stringUtil";
|
import { capitalizeFirstLetter } from "@/utils/stringUtil";
|
||||||
|
import { Link } from "react-router-dom";
|
||||||
|
import { LuExternalLink } from "react-icons/lu";
|
||||||
|
|
||||||
type ObjectSettingsViewProps = {
|
type ObjectSettingsViewProps = {
|
||||||
selectedCamera?: string;
|
selectedCamera?: string;
|
||||||
@ -134,6 +136,17 @@ export default function ObjectSettingsView({
|
|||||||
statistics. The object list shows a time-delayed summary of detected
|
statistics. The object list shows a time-delayed summary of detected
|
||||||
objects.
|
objects.
|
||||||
</p>
|
</p>
|
||||||
|
<div className="flex items-center text-primary">
|
||||||
|
<Link
|
||||||
|
to="https://docs.frigate.video/frigate/glossary#bounding-box-colors"
|
||||||
|
target="_blank"
|
||||||
|
rel="noopener noreferrer"
|
||||||
|
className="inline"
|
||||||
|
>
|
||||||
|
Read the meaning of bounding box colors
|
||||||
|
<LuExternalLink className="ml-2 inline-flex size-3" />
|
||||||
|
</Link>
|
||||||
|
</div>
|
||||||
</div>
|
</div>
|
||||||
|
|
||||||
<Tabs defaultValue="debug" className="w-full">
|
<Tabs defaultValue="debug" className="w-full">
|
||||||
|
Loading…
Reference in New Issue
Block a user