mirror of
				https://github.com/blakeblackshear/frigate.git
				synced 2025-10-27 10:52:11 +01:00 
			
		
		
		
	move zone config under each camera
This commit is contained in:
		
							parent
							
								
									005e188d38
								
							
						
					
					
						commit
						fdc8bbf72d
					
				| @ -76,39 +76,6 @@ objects: | ||||
|       min_score: 0.5 | ||||
|       threshold: 0.85 | ||||
| 
 | ||||
| zones: | ||||
|   ################# | ||||
|   # Name of the zone | ||||
|   ################ | ||||
|   front_steps: | ||||
|     front_door: | ||||
|       #################### | ||||
|       # For each camera, a list of x,y coordinates to define the polygon of the zone. The top  | ||||
|       # left corner is 0,0. Can also be a comma separated string of all x,y coordinates combined. | ||||
|       # The same zone can exist across multiple cameras if they have overlapping FOVs. | ||||
|       # An object is determined to be in the zone based on whether or not the bottom center | ||||
|       # of it's bounding box is within the polygon. The polygon must have at least 3 points. | ||||
|       # Coordinates can be generated at https://www.image-map.net/ | ||||
|       #################### | ||||
|       coordinates: | ||||
|         - 545,1077 | ||||
|         - 747,939 | ||||
|         - 788,805 | ||||
|       ################ | ||||
|       # Zone level object filters. These are applied in addition to the global and camera filters | ||||
|       # and should be more restrictive than the global and camera filters. The global and camera | ||||
|       # filters are applied upstream. | ||||
|       ################ | ||||
|       filters: | ||||
|         person: | ||||
|           min_area: 5000 | ||||
|           max_area: 100000 | ||||
|           threshold: 0.8 | ||||
|   driveway: | ||||
|     front_door: | ||||
|       coordinates: 545,1077,747,939,788,805 | ||||
|   yard: | ||||
| 
 | ||||
| cameras: | ||||
|   back: | ||||
|     ffmpeg: | ||||
| @ -169,6 +136,37 @@ cameras: | ||||
|     #   crop_to_region: True | ||||
|     #   snapshot_height: 300 | ||||
| 
 | ||||
|     ################ | ||||
|     # Zones | ||||
|     ################ | ||||
|     zones: | ||||
|       ################# | ||||
|       # Name of the zone | ||||
|       ################ | ||||
|       front_steps: | ||||
|         #################### | ||||
|         # A list of x,y coordinates to define the polygon of the zone. The top  | ||||
|         # left corner is 0,0. Can also be a comma separated string of all x,y coordinates combined. | ||||
|         # The same zone name can exist across multiple cameras if they have overlapping FOVs. | ||||
|         # An object is determined to be in the zone based on whether or not the bottom center | ||||
|         # of it's bounding box is within the polygon. The polygon must have at least 3 points. | ||||
|         # Coordinates can be generated at https://www.image-map.net/ | ||||
|         #################### | ||||
|         coordinates: | ||||
|           - 545,1077 | ||||
|           - 747,939 | ||||
|           - 788,805 | ||||
|         ################ | ||||
|         # Zone level object filters. These are applied in addition to the global and camera filters | ||||
|         # and should be more restrictive than the global and camera filters. The global and camera | ||||
|         # filters are applied upstream. | ||||
|         ################ | ||||
|         filters: | ||||
|           person: | ||||
|             min_area: 5000 | ||||
|             max_area: 100000 | ||||
|             threshold: 0.8 | ||||
| 
 | ||||
|     ################ | ||||
|     # This will save a clip for each tracked object by frigate along with a json file that contains | ||||
|     # data related to the tracked object. This works by telling ffmpeg to write video segments to /cache | ||||
|  | ||||
| @ -182,7 +182,7 @@ def main(): | ||||
|             'show_timestamp': config.get('snapshots', {}).get('show_timestamp', True), | ||||
|             'draw_zones': config.get('snapshots', {}).get('draw_zones', False) | ||||
|         } | ||||
|         config['zones'] = {} | ||||
|         config['zones'] = config.get('zones', {}) | ||||
| 
 | ||||
|     # Queue for cameras to push tracked objects to | ||||
|     tracked_objects_queue = mp.Queue() | ||||
| @ -293,7 +293,7 @@ def main(): | ||||
|     event_processor = EventProcessor(CONFIG['cameras'], camera_processes, '/cache', '/clips', event_queue, stop_event) | ||||
|     event_processor.start() | ||||
|      | ||||
|     object_processor = TrackedObjectProcessor(CONFIG['cameras'], CONFIG.get('zones', {}), client, MQTT_TOPIC_PREFIX, tracked_objects_queue, event_queue, stop_event) | ||||
|     object_processor = TrackedObjectProcessor(CONFIG['cameras'], client, MQTT_TOPIC_PREFIX, tracked_objects_queue, event_queue, stop_event) | ||||
|     object_processor.start() | ||||
|      | ||||
|     camera_watchdog = CameraWatchdog(camera_processes, CONFIG['cameras'], tflite_process, tracked_objects_queue, plasma_process, stop_event) | ||||
|  | ||||
| @ -230,10 +230,9 @@ class CameraState(): | ||||
| 
 | ||||
| 
 | ||||
| class TrackedObjectProcessor(threading.Thread): | ||||
|     def __init__(self, camera_config, zone_config, client, topic_prefix, tracked_objects_queue, event_queue, stop_event): | ||||
|     def __init__(self, camera_config, client, topic_prefix, tracked_objects_queue, event_queue, stop_event): | ||||
|         threading.Thread.__init__(self) | ||||
|         self.camera_config = camera_config | ||||
|         self.zone_config = zone_config | ||||
|         self.client = client | ||||
|         self.topic_prefix = topic_prefix | ||||
|         self.tracked_objects_queue = tracked_objects_queue | ||||
| @ -299,25 +298,24 @@ class TrackedObjectProcessor(threading.Thread): | ||||
|         self.zone_data = defaultdict(lambda: defaultdict(lambda: set())) | ||||
| 
 | ||||
|         # set colors for zones | ||||
|         all_zone_names = set([zone for config in self.camera_config.values() for zone in config['zones'].keys()]) | ||||
|         zone_colors = {} | ||||
|         colors = plt.cm.get_cmap('tab10', len(self.zone_config.keys())) | ||||
|         for i, zone in enumerate(self.zone_config.keys()): | ||||
|         colors = plt.cm.get_cmap('tab10', len(all_zone_names)) | ||||
|         for i, zone in enumerate(all_zone_names): | ||||
|             zone_colors[zone] = tuple(int(round(255 * c)) for c in colors(i)[:3]) | ||||
| 
 | ||||
|         # create zone contours | ||||
|         for zone_name, config in zone_config.items(): | ||||
|             for camera, camera_zone_config in config.items(): | ||||
|                 camera_zone = {} | ||||
|                 camera_zone['color'] = zone_colors[zone_name] | ||||
|                 coordinates = camera_zone_config['coordinates'] | ||||
|         for camera_config in self.camera_config.values(): | ||||
|             for zone_name, zone_config in camera_config['zones'].items(): | ||||
|                 zone_config['color'] = zone_colors[zone_name] | ||||
|                 coordinates = zone_config['coordinates'] | ||||
|                 if isinstance(coordinates, list): | ||||
|                     camera_zone['contour'] =  np.array([[int(p.split(',')[0]), int(p.split(',')[1])] for p in coordinates]) | ||||
|                     zone_config['contour'] =  np.array([[int(p.split(',')[0]), int(p.split(',')[1])] for p in coordinates]) | ||||
|                 elif isinstance(coordinates, str): | ||||
|                     points = coordinates.split(',') | ||||
|                     camera_zone['contour'] =  np.array([[int(points[i]), int(points[i+1])] for i in range(0, len(points), 2)]) | ||||
|                     zone_config['contour'] =  np.array([[int(points[i]), int(points[i+1])] for i in range(0, len(points), 2)]) | ||||
|                 else: | ||||
|                     print(f"Unable to parse zone coordinates for {zone_name} - {camera}") | ||||
|                 self.camera_config[camera]['zones'][zone_name] = camera_zone | ||||
|          | ||||
|     def get_best(self, camera, label): | ||||
|         best_objects = self.camera_states[camera].best_objects | ||||
|  | ||||
		Loading…
	
		Reference in New Issue
	
	Block a user