mirror of
https://github.com/blakeblackshear/frigate.git
synced 2026-03-07 02:18:07 +01:00
Miscellaneous Fixes (#20866)
* Don't warn when event ids have expired for trigger sync * Import faster_whisper conditinally to avoid illegal instruction * Catch OpenVINO runtime error * fix race condition in detail stream context navigating between tracked objects in Explore would sometimes prevent the object track from appearing * Handle case where classification images are deleted * Adjust default rounded corners on larger screens * Improve flow handling for classification state * Remove images when wizard is cancelled * Improve deletion handling for classes * Set constraints on review buffers * Update to support correct data format * Set minimum duration for recording based review items * Use friendly name in review genai prompt --------- Co-authored-by: Nicolas Mowen <nickmowen213@gmail.com>
This commit is contained in:
@@ -28,6 +28,7 @@ import {
|
||||
CustomClassificationModelConfig,
|
||||
FrigateConfig,
|
||||
} from "@/types/frigateConfig";
|
||||
import { ClassificationDatasetResponse } from "@/types/classification";
|
||||
import { getTranslatedLabel } from "@/utils/i18n";
|
||||
import { zodResolver } from "@hookform/resolvers/zod";
|
||||
import axios from "axios";
|
||||
@@ -140,16 +141,19 @@ export default function ClassificationModelEditDialog({
|
||||
});
|
||||
|
||||
// Fetch dataset to get current classes for state models
|
||||
const { data: dataset } = useSWR<{
|
||||
[id: string]: string[];
|
||||
}>(isStateModel ? `classification/${model.name}/dataset` : null, {
|
||||
revalidateOnFocus: false,
|
||||
});
|
||||
const { data: dataset } = useSWR<ClassificationDatasetResponse>(
|
||||
isStateModel ? `classification/${model.name}/dataset` : null,
|
||||
{
|
||||
revalidateOnFocus: false,
|
||||
},
|
||||
);
|
||||
|
||||
// Update form with classes from dataset when loaded
|
||||
useEffect(() => {
|
||||
if (isStateModel && dataset) {
|
||||
const classes = Object.keys(dataset).filter((key) => key !== "none");
|
||||
if (isStateModel && dataset?.categories) {
|
||||
const classes = Object.keys(dataset.categories).filter(
|
||||
(key) => key !== "none",
|
||||
);
|
||||
if (classes.length > 0) {
|
||||
(form as ReturnType<typeof useForm<StateFormData>>).setValue(
|
||||
"classes",
|
||||
|
||||
@@ -15,6 +15,7 @@ import Step3ChooseExamples, {
|
||||
} from "./wizard/Step3ChooseExamples";
|
||||
import { cn } from "@/lib/utils";
|
||||
import { isDesktop } from "react-device-detect";
|
||||
import axios from "axios";
|
||||
|
||||
const OBJECT_STEPS = [
|
||||
"wizard.steps.nameAndDefine",
|
||||
@@ -120,7 +121,18 @@ export default function ClassificationModelWizardDialog({
|
||||
dispatch({ type: "PREVIOUS_STEP" });
|
||||
};
|
||||
|
||||
const handleCancel = () => {
|
||||
const handleCancel = async () => {
|
||||
// Clean up any generated training images if we're cancelling from Step 3
|
||||
if (wizardState.step1Data && wizardState.step3Data?.examplesGenerated) {
|
||||
try {
|
||||
await axios.delete(
|
||||
`/classification/${wizardState.step1Data.modelName}`,
|
||||
);
|
||||
} catch (error) {
|
||||
// Silently fail - user is already cancelling
|
||||
}
|
||||
}
|
||||
|
||||
dispatch({ type: "RESET" });
|
||||
onClose();
|
||||
};
|
||||
|
||||
@@ -165,18 +165,15 @@ export default function Step3ChooseExamples({
|
||||
const isLastClass = currentClassIndex === allClasses.length - 1;
|
||||
|
||||
if (isLastClass) {
|
||||
// Assign remaining unclassified images
|
||||
unknownImages.slice(0, 24).forEach((imageName) => {
|
||||
if (!newClassifications[imageName]) {
|
||||
// For state models with 2 classes, assign to the last class
|
||||
// For object models, assign to "none"
|
||||
if (step1Data.modelType === "state" && allClasses.length === 2) {
|
||||
newClassifications[imageName] = allClasses[allClasses.length - 1];
|
||||
} else {
|
||||
// For object models, assign remaining unclassified images to "none"
|
||||
// For state models, this should never happen since we require all images to be classified
|
||||
if (step1Data.modelType !== "state") {
|
||||
unknownImages.slice(0, 24).forEach((imageName) => {
|
||||
if (!newClassifications[imageName]) {
|
||||
newClassifications[imageName] = "none";
|
||||
}
|
||||
}
|
||||
});
|
||||
});
|
||||
}
|
||||
|
||||
// All done, trigger training immediately
|
||||
setImageClassifications(newClassifications);
|
||||
@@ -316,8 +313,15 @@ export default function Step3ChooseExamples({
|
||||
return images;
|
||||
}
|
||||
|
||||
return images.filter((img) => !imageClassifications[img]);
|
||||
}, [unknownImages, imageClassifications]);
|
||||
// If we're viewing a previous class (going back), show images for that class
|
||||
// Otherwise show only unclassified images
|
||||
const currentClassInView = allClasses[currentClassIndex];
|
||||
return images.filter((img) => {
|
||||
const imgClass = imageClassifications[img];
|
||||
// Show if: unclassified OR classified with current class we're viewing
|
||||
return !imgClass || imgClass === currentClassInView;
|
||||
});
|
||||
}, [unknownImages, imageClassifications, allClasses, currentClassIndex]);
|
||||
|
||||
const allImagesClassified = useMemo(() => {
|
||||
return unclassifiedImages.length === 0;
|
||||
@@ -326,15 +330,26 @@ export default function Step3ChooseExamples({
|
||||
// For state models on the last class, require all images to be classified
|
||||
const isLastClass = currentClassIndex === allClasses.length - 1;
|
||||
const canProceed = useMemo(() => {
|
||||
if (
|
||||
step1Data.modelType === "state" &&
|
||||
isLastClass &&
|
||||
!allImagesClassified
|
||||
) {
|
||||
return false;
|
||||
if (step1Data.modelType === "state" && isLastClass) {
|
||||
// Check if all 24 images will be classified after current selections are applied
|
||||
const totalImages = unknownImages.slice(0, 24).length;
|
||||
|
||||
// Count images that will be classified (either already classified or currently selected)
|
||||
const allImages = unknownImages.slice(0, 24);
|
||||
const willBeClassified = allImages.filter((img) => {
|
||||
return imageClassifications[img] || selectedImages.has(img);
|
||||
}).length;
|
||||
|
||||
return willBeClassified >= totalImages;
|
||||
}
|
||||
return true;
|
||||
}, [step1Data.modelType, isLastClass, allImagesClassified]);
|
||||
}, [
|
||||
step1Data.modelType,
|
||||
isLastClass,
|
||||
unknownImages,
|
||||
imageClassifications,
|
||||
selectedImages,
|
||||
]);
|
||||
|
||||
const handleBack = useCallback(() => {
|
||||
if (currentClassIndex > 0) {
|
||||
|
||||
@@ -12,13 +12,13 @@ export function ImageShadowOverlay({
|
||||
<>
|
||||
<div
|
||||
className={cn(
|
||||
"pointer-events-none absolute inset-x-0 top-0 z-10 h-[30%] w-full rounded-lg bg-gradient-to-b from-black/20 to-transparent md:rounded-2xl",
|
||||
"pointer-events-none absolute inset-x-0 top-0 z-10 h-[30%] w-full rounded-lg bg-gradient-to-b from-black/20 to-transparent",
|
||||
upperClassName,
|
||||
)}
|
||||
/>
|
||||
<div
|
||||
className={cn(
|
||||
"pointer-events-none absolute inset-x-0 bottom-0 z-10 h-[10%] w-full rounded-lg bg-gradient-to-t from-black/20 to-transparent md:rounded-2xl",
|
||||
"pointer-events-none absolute inset-x-0 bottom-0 z-10 h-[10%] w-full rounded-lg bg-gradient-to-t from-black/20 to-transparent",
|
||||
lowerClassName,
|
||||
)}
|
||||
/>
|
||||
|
||||
@@ -77,7 +77,10 @@ export default function BirdseyeLivePlayer({
|
||||
)}
|
||||
onClick={onClick}
|
||||
>
|
||||
<ImageShadowOverlay />
|
||||
<ImageShadowOverlay
|
||||
upperClassName="md:rounded-2xl"
|
||||
lowerClassName="md:rounded-2xl"
|
||||
/>
|
||||
<div className="size-full" ref={playerRef}>
|
||||
{player}
|
||||
</div>
|
||||
|
||||
@@ -331,7 +331,10 @@ export default function LivePlayer({
|
||||
>
|
||||
{cameraEnabled &&
|
||||
((showStillWithoutActivity && !liveReady) || liveReady) && (
|
||||
<ImageShadowOverlay />
|
||||
<ImageShadowOverlay
|
||||
upperClassName="md:rounded-2xl"
|
||||
lowerClassName="md:rounded-2xl"
|
||||
/>
|
||||
)}
|
||||
{player}
|
||||
{cameraEnabled &&
|
||||
|
||||
Reference in New Issue
Block a user