From e0b9b616ce86b037d1d80339bd07cc1ff8cdca70 Mon Sep 17 00:00:00 2001 From: blakeblackshear Date: Sat, 30 Mar 2019 07:58:31 -0500 Subject: [PATCH] cleanup and update readme --- Dockerfile | 7 ++- README.md | 114 +++++++++-------------------------- config/config.yml | 32 ++++++++-- config/mask-0-300.bmp | Bin 245056 -> 0 bytes config/mask-350-250.bmp | Bin 320056 -> 0 bytes config/mask-750-250.bmp | Bin 320056 -> 0 bytes detect_objects.py | 27 ++------- frigate/motion.py | 116 ------------------------------------ frigate/mqtt.py | 23 ------- frigate/object_detection.py | 8 +-- frigate/objects.py | 20 ++----- frigate/video.py | 5 +- 12 files changed, 74 insertions(+), 278 deletions(-) delete mode 100644 config/mask-0-300.bmp delete mode 100644 config/mask-350-250.bmp delete mode 100644 config/mask-750-250.bmp delete mode 100644 frigate/motion.py diff --git a/Dockerfile b/Dockerfile index 7dd0026f2..b2f6e7edf 100644 --- a/Dockerfile +++ b/Dockerfile @@ -92,6 +92,10 @@ RUN tar xzf edgetpu_api.tar.gz \ RUN (apt-get autoremove -y; \ apt-get autoclean -y) +# symlink the model and labels +RUN ln -s /python-tflite-source/edgetpu/test_data/mobilenet_ssd_v2_coco_quant_postprocess_edgetpu.tflite /frozen_inference_graph.pb +RUN ln -s /python-tflite-source/edgetpu/test_data/coco_labels.txt /label_map.pbtext + # Set TF object detection available ENV PYTHONPATH "$PYTHONPATH:/usr/local/lib/python3.5/dist-packages/tensorflow/models/research:/usr/local/lib/python3.5/dist-packages/tensorflow/models/research/slim" RUN cd /usr/local/lib/python3.5/dist-packages/tensorflow/models/research && protoc object_detection/protos/*.proto --python_out=. @@ -101,6 +105,3 @@ ADD frigate frigate/ COPY detect_objects.py . CMD ["python3", "-u", "detect_objects.py"] - -# WORKDIR /python-tflite-source/edgetpu/ -# CMD ["python3", "-u", "demo/classify_image.py", "--model", "test_data/mobilenet_v2_1.0_224_inat_bird_quant_edgetpu.tflite", "--label", "test_data/inat_bird_labels.txt", "--image", "test_data/parrot.jpg"] \ No newline at end of file diff --git a/README.md b/README.md index 255865ea7..933a6ccfa 100644 --- a/README.md +++ b/README.md @@ -1,18 +1,18 @@ # Frigate - Realtime Object Detection for RTSP Cameras +**Note:** This version requires the use of a [Google Coral USB Accelerator](https://coral.withgoogle.com/products/accelerator/) + Uses OpenCV and Tensorflow to perform realtime object detection locally for RTSP cameras. Designed for integration with HomeAssistant or others via MQTT. - Leverages multiprocessing and threads heavily with an emphasis on realtime over processing every frame -- Allows you to define specific regions (squares) in the image to look for motion/objects -- Motion detection runs in a separate process per region and signals to object detection to avoid wasting CPU cycles looking for objects when there is no motion -- Object detection with Tensorflow runs in a separate process per region -- Detected objects are placed on a shared mp.Queue and aggregated into a list of recently detected objects in a separate thread -- A person score is calculated as the sum of all scores/5 -- Motion and object info is published over MQTT for integration into HomeAssistant or others +- Allows you to define specific regions (squares) in the image to look for objects +- No motion detection (for now) +- Object detection with Tensorflow runs in a separate thread +- Object info is published over MQTT for integration into HomeAssistant as a binary sensor - An endpoint is available to view an MJPEG stream for debugging ![Diagram](diagram.png) -## Example video +## Example video (from older version) You see multiple bounding boxes because it draws bounding boxes from all frames in the past 1 second where a person was detected. Not all of the bounding boxes were from the current frame. [![](http://img.youtube.com/vi/nqHbCtyo4dY/0.jpg)](http://www.youtube.com/watch?v=nqHbCtyo4dY "Frigate") @@ -22,24 +22,16 @@ Build the container with docker build -t frigate . ``` -Download a model from the [zoo](https://github.com/tensorflow/models/blob/master/research/object_detection/g3doc/detection_model_zoo.md). - -Download the cooresponding label map from [here](https://github.com/tensorflow/models/tree/master/research/object_detection/data). +The `mobilenet_ssd_v2_coco_quant_postprocess_edgetpu.tflite` model is included and used by default. You can use your own model and labels by mounting files in the container at `/frozen_inference_graph.pb` and `/label_map.pbtext`. Models must be compatible with the Coral according to [this](https://coral.withgoogle.com/models/). Run the container with ``` docker run --rm \ --v :/frozen_inference_graph.pb:ro \ --v :/label_map.pbtext:ro \ +--privileged \ +-v /dev/bus/usb:/dev/bus/usb \ -v :/config:ro \ -p 5000:5000 \ --e RTSP_URL='' \ --e REGIONS=',,,,,:,,,,,' \ --e MQTT_HOST='your.mqtthost.com' \ --e MQTT_USER='username' \ --e MQTT_PASS='password' \ --e MQTT_TOPIC_PREFIX='cameras/1' \ --e DEBUG='0' \ +-e RTSP_PASSWORD='password' \ frigate:latest ``` @@ -48,107 +40,59 @@ Example docker-compose: frigate: container_name: frigate restart: unless-stopped + privileged: true image: frigate:latest volumes: - - :/frozen_inference_graph.pb:ro - - :/label_map.pbtext:ro + - /dev/bus/usb:/dev/bus/usb - :/config ports: - - "127.0.0.1:5000:5000" + - "5000:5000" environment: - RTSP_URL: "" - REGIONS: ",,,,,:,,,,," - MQTT_HOST: "your.mqtthost.com" - MQTT_USER: "username" #optional - MQTT_PASS: "password" #optional - MQTT_TOPIC_PREFIX: "cameras/1" - DEBUG: "0" + RTSP_PASSWORD: "password" ``` -Here is an example `REGIONS` env variable: -`350,0,300,5000,200,mask-0-300.bmp:400,350,250,2000,200,mask-350-250.bmp:400,750,250,2000,200,mask-750-250.bmp` +A `config.yml` file must exist in the `config` directory. See example [here](config/config.yml). -First region broken down (all are required): -- `350` - size of the square (350px by 350px) -- `0` - x coordinate of upper left corner (top left of image is 0,0) -- `300` - y coordinate of upper left corner (top left of image is 0,0) -- `5000` - minimum person bounding box size (width*height for bounding box of identified person) -- `200` - minimum number of changed pixels to trigger motion -- `mask-0-300.bmp` - a bmp file with the masked regions as pure black, must be the same size as the region - -Mask files go in the `/config` directory. - -Access the mjpeg stream at http://localhost:5000 +Access the mjpeg stream at `http://localhost:5000/` and the best person snapshot at `http://localhost:5000//best_person.jpg` ## Integration with HomeAssistant ``` camera: - name: Camera Last Person platform: generic - still_image_url: http://:5000/best_person.jpg - -binary_sensor: - - name: Camera Motion - platform: mqtt - state_topic: "cameras/1/motion" - device_class: motion - availability_topic: "cameras/1/available" + still_image_url: http://:5000//best_person.jpg sensor: - - name: Camera Person Score + - name: Camera Person platform: mqtt - state_topic: "cameras/1/objects" + state_topic: "frigate//objects" value_template: '{{ value_json.person }}' - unit_of_measurement: '%' - availability_topic: "cameras/1/available" + device_class: moving + availability_topic: "frigate/available" ``` ## Tips - Lower the framerate of the RTSP feed on the camera to reduce the CPU usage for capturing the feed -- Use SSDLite models to reduce CPU usage ## Future improvements - [x] Remove motion detection for now -- [ ] Try running object detection in a thread rather than a process +- [x] Try running object detection in a thread rather than a process - [x] Implement min person size again -- [ ] Switch to a config file -- [ ] Handle multiple cameras in the same container -- [ ] Simplify motion detection (check entire image against mask) +- [x] Switch to a config file +- [x] Handle multiple cameras in the same container +- [ ] Attempt to figure out coral symlinking +- [ ] Add object list to config with min scores for mqtt +- [ ] Move mjpeg encoding to a separate process +- [ ] Simplify motion detection (check entire image against mask, resize instead of gaussian blur) - [ ] See if motion detection is even worth running - [ ] Scan for people across entire image rather than specfic regions - [ ] Dynamically resize detection area and follow people - [ ] Add ability to turn detection on and off via MQTT -- [ ] MQTT motion occasionally gets stuck ON - [ ] Output movie clips of people for notifications, etc. - [ ] Integrate with homeassistant push camera - [ ] Merge bounding boxes that span multiple regions -- [ ] Allow motion regions to be different than object detection regions - [ ] Implement mode to save labeled objects for training - [ ] Try and reduce CPU usage by simplifying the tensorflow model to just include the objects we care about - [ ] Look into GPU accelerated decoding of RTSP stream - [ ] Send video over a socket and use JSMPEG - [x] Look into neural compute stick - -## Building Tensorflow from source for CPU optimizations -https://www.tensorflow.org/install/source#docker_linux_builds -used `tensorflow/tensorflow:1.12.0-devel-py3` - -## Optimizing the graph (cant say I saw much difference in CPU usage) -https://github.com/tensorflow/tensorflow/blob/master/tensorflow/tools/graph_transforms/README.md#optimizing-for-deployment -``` -docker run -it -v ${PWD}:/lab -v ${PWD}/../back_camera_model/models/ssd_mobilenet_v2_coco_2018_03_29/frozen_inference_graph.pb:/frozen_inference_graph.pb:ro tensorflow/tensorflow:1.12.0-devel-py3 bash - -bazel build tensorflow/tools/graph_transforms:transform_graph - -bazel-bin/tensorflow/tools/graph_transforms/transform_graph \ ---in_graph=/frozen_inference_graph.pb \ ---out_graph=/lab/optimized_inception_graph.pb \ ---inputs='image_tensor' \ ---outputs='num_detections,detection_scores,detection_boxes,detection_classes' \ ---transforms=' - strip_unused_nodes(type=float, shape="1,300,300,3") - remove_nodes(op=Identity, op=CheckNumerics) - fold_constants(ignore_errors=true) - fold_batch_norms - fold_old_batch_norms' -``` \ No newline at end of file diff --git a/config/config.yml b/config/config.yml index baa897662..53ee9d20b 100644 --- a/config/config.yml +++ b/config/config.yml @@ -1,8 +1,8 @@ web_port: 5000 mqtt: - host: mqtt.blakeshome.com - topic_prefix: cameras + host: mqtt.server.com + topic_prefix: frigate cameras: back: @@ -10,18 +10,40 @@ cameras: user: viewer host: 10.0.10.10 port: 554 + # values that begin with a "$" will be replaced with environment variable password: $RTSP_PASSWORD path: /cam/realmonitor?channel=1&subtype=2 regions: - size: 350 x_offset: 0 y_offset: 300 - min_person_size: 5000 + min_person_area: 5000 - size: 400 x_offset: 350 y_offset: 250 - min_person_size: 2000 + min_person_area: 2000 - size: 400 x_offset: 750 y_offset: 250 - min_person_size: 2000 \ No newline at end of file + min_person_area: 2000 + back2: + rtsp: + user: viewer + host: 10.0.10.10 + port: 554 + # values that begin with a "$" will be replaced with environment variable + password: $RTSP_PASSWORD + path: /cam/realmonitor?channel=1&subtype=2 + regions: + - size: 350 + x_offset: 0 + y_offset: 300 + min_person_area: 5000 + - size: 400 + x_offset: 350 + y_offset: 250 + min_person_area: 2000 + - size: 400 + x_offset: 750 + y_offset: 250 + min_person_area: 2000 \ No newline at end of file diff --git a/config/mask-0-300.bmp b/config/mask-0-300.bmp deleted file mode 100644 index 8a80c196f69459ea683dac5021eaa8e4189ca8b4..0000000000000000000000000000000000000000 GIT binary patch literal 0 HcmV?d00001 literal 245056 zcmeI)AxtA(0tav|hnUu-Ct@bKP0zCHi1fA0P9^~VF>{`>l68@EB4wwEPt>ie`lI-88PvGvNRuE1i>&-TP(n2Y7TMi-8Le*k3^eyU zr}_=m%+)sbdNR81Y_hHW@x59{-OFw4g)*ur&`7TGL&YCMx1pbsgS&IRhAQT&{C!?z zozt><3?crZ*Wh@q{Cccn36oK;)!g4uBm3&xJe}zARxw%S?{oL^zVT}0Hum5{S9Z&< z$10Zad@-$WET*r%k?dA6SKHX<`Jnvl>hEr&CaYIQ`R(r0iR@M{SKHX*^8358yN!CR zVj1O+Gt^@3ZWWVN{ys0VelIzA^&o3Q4;tSszZR=nLiaLimN&xN+#j83cdMGMviCc* zyyeXbCkOL^IpMPFu)6O*8QmV2H80#c8l=^gwddsvWW#dO8%KJ(<=0--OUQ&~>E9K5yWA>{Vy z#CWdsd>(;CHXC;@`@QQ<)C?Jpqd%uOj;8grJvtkt<{>^$z1fnbKxl&pX(O2rZu*_m zkmLKtw0_@sJWSiQ`1xWolG~+~@1Iv7w8Yu4d)Y`DN%t}vTI2Y6>&li9AV7cs0RjXF z5FkK+009C72oNAZfB*pk1PBlyK!5-N0t5&UAV7cs0RjXF5FkK+009C72oNAZfB*pk z1PBlyK!5-N0t5&UAV7cs0RjXF5FkK+009C72oNAZfB*pk1PBlyK!5-N0t5&UAV7cs z0RjXF5FkK+009C72oNAZfB*pk1PBlyK!5-N0t5&UAV7cs0RjXF5FkK+009C72oNAZ zfB*pk1PBlyK!5-N0t5&UAV7cs0RjXF5FkK+009C72oNAZfB*pk1PBlyK!5-N0t5&U zAV7cs0RjXF5FkK+009C72oNAZfB*pk1PBlyK!5-N0t5&UAV7cs0RjXF5FkK+009C7 z2oNAZfB*pk1PBlyK!5-N0t5&UAV7cs0RjXF5FkK+009C72oNAZfB*pk1PBlyK!5-N z0t5&UAV7cs0RjXF5FkK+009C72oNAZfB*pk1PBlyK!5-N0t5&UAV7cs0RjXF5FkK+ z009C72oNAZfB*pk1PBlyK!5-N0t5&UAV7cs0RjXF5FkK+009C72oNAZfB*pk1PBly zK!5-N0t5&UAV7cs0RjXF5FkK+009C72oNAZfB*pk1PBlyK!5-N0t5&UAV7cs0RjXF z5FkK+009C72oNAZfB*pk1PBlyK!5-N0t5&UAV7cs0RjXF5FkK+009C72oNAZfB*pk z1PBlyK!5-N0t5&UAV7cs0RjXF5FkK+009C72oNAZfB*pk1PBlyK!5-N0t5&UAV7cs z0RjXF5FkK+009C72oNAZfB*pk1PBlyK!5-N0t5&UAV7cs0RjXF5FkK+009C72oNAZ zfB*pk1PBlyK!5-N0t5&UAV7cs0RjXF5FkK+009C72oNAZfB*pk1PBlyK!5-N0t5&U zAV7cs0RjXF5FkK+009C72oNAZfB*pk1PBlyK!5-N0t5&UAV7cs0RjXF5FkK+009C7 z2oNAZfB*pk1PBlyK!5-N0t5&UAV7cs0RjXF5FkK+009C72oNAZfB*pk1PBlyK!5-N z0t5&UAV7cs0RjXF5FkK+009C72oNAZfB*pk1PBlyK!5-N0t5&UAV7cs0RjXF5FkK+ z009C72oNAZfB*pk1PBlyK!5-N0t5&UAV7cs0RjXF5FqfG1s2(C9Bzy3GjFABBk)pS zkxfSPK`p*LzV1A9`@QFjY$(Amh1~y~0*kDX+#XNs$w3@7_rooY9uFV7H~mJE4JRXQ z@#m~JTeZlZFM6#w+WX7D=6-T;-)J3Ocb-m0XCcS>6Y}zz1iF{ko#y^ug8Slu&<-aD zSGA~ldptVpUQR~(6Q8LDtGX?-ale-wya;OH-H_sGJ#F`U^T8m^dlR88&TmyUwBNfC z7^K&o!nnTrWE{oORqgSx*SbA^I;q9|-c5f#Xe5I)8)m~n8g3!kyV)nDRzV>2bsrC3 zjpy1AhPF7*^0a>6cs#ssOzYG7NAtmYt?z8yNIIu!`|&U&KfmRlg7A509i{Ei!jI0LFS?hG z8LOSzeD4?8-TC^x-FHU!kC#6l&IfO|mXp!w?D6oeBgXug@xIY(8TXLhMRwCKH@ka# zt)uQ`vGior?@jCP`DEmycWJv8-<^kX4`X-5!oTCQA-k`Q?7k;QZ@sp;-|rRm8$!S3 zXZzeC!;gm{!{6so!kEYJSXcJxM}b8)Ix9Q7dwXd+8~#}C%b&90L$|oM@YbheZ!csx z93S>tVMhDzJnt1RX9T7BrT6c0f06Zj&%@L8G3Fv0^4eLSN9pde#wUKo0nLf#GiR4 zj&6^OM(}o1FG_t|AoO?3|BGGJ-~IY!GTP)A+57Xm-mLZ8mR_-4fiS*X?g&-LQkYp= z-QS&!d#(2;ZganJRV&)M?be6V-XrjQQGTn}+%M|y?mD~4!PCjFaJ*)H|DSU`89j8% zK2~b(Kb;h{yRD+PT}~aCS*P zQn9?IM$sqd*L-eyJ;>!b=F2NCG?KgXTAa4;8_7XXc{>>)xudhdfZP6puY`OZByK%3hV9_b$Q}VwUIo8(l8$@Ofk-0%c)I>0I5fzBu90C(j!OT@4Dk3lu2m~Sm5%C&i#feHR z5{blCRw5B+Dv`*$=Xbdrv?Vkf@`ug)eVC{GNk}%I-5=R(HXHXJfB#prA^-mN>;GSW z{r~50(f{8XrhMG^)351&`E%pH|Nr~JuRs6(od*I4Ab##T8%&JNc>qX#J1a@I>JRA(1W!r8x{`b!{mhFKv9C#D~M7z%D$DCikv!8WpFnU1M@jgxo?-k%lS+ zdgGUFa&$o0RtP_E11r|Z~(QlpZFV4O8Tir*F)T_-Y)?S5x zRs^aMAU>*bXRUe&^u`}|pZ5~U@2Fqtk*B0GKY?&MaFQe9^OU*C@p*poEJ+7}&CGPu zw(lA??YlbUDQV18VC$tjLU)aQpYwxx%CjsT1Qx;SA;}pO~*aOVdMOGYg*1UmF!4pSuSA+x5sxQdxw+A{Y*mk3u9mdSi|~ijZfSdI&6n z*P+uncN{V|_kE&6k6a{`#R+U?y>a)^>G-WUN?ha3T0LyNaLT@;ICqw;i@+}2%)H6g z3!b`biYw7Xn!YHZyN> z7`+aIr}eYtFM?f|bxC{=fw~EV)7RnZ!LXO@x%p-}oqhi>+RW;qV>AqQZ zv+@6buHkfEheSvgO}TkUHfy+oCGqO`OhYOq{-UjeVbE5LHp;uakh~ zFWaXvar#JphBfw~Cn!u7M?I>y7-R|ALrM_uxo z?NArkg|EX)@!@pXf2gbSjk*Zz!r;j@zV0=S@87Jg#~!3Exy^Q{3hct}V^Zs`M2X#W zI_qa5MAen~>mjfU_x(%pArbO%$9eR6iu&BI9ltz`=kP!mWSAB{%vq_$+{MnHdo^zrfDqXkR<{ALT(1SCF= zy-6;;GHiNcso65O{sQUa!*6i}qJMs~1!@AD*}%E9*EmkIo2E+*OHG%#br)C!hJB2I z%dbA}XyesAze&HEKsY@=cFDO&agE@KJ&2kwbL%Z&`IoM){Td~V_oH`ilX^9QpRTU` z8XwCxD?#+mb5fsPz?*#B#U(%8v)x=hP{T?u%$zz4^u{W!S5X9UC5X;BPTExjww}t- zc@!V6v3^!r2lFDJr@$^84%FkL(V!=5)vV0x4moy`wBCwfF?$eB2@-;&2=5RniEEQ?y)lnd8d^8%|6-3AUCe3OB5+7gp z4e8#~#;YdH+&T(mj}NXT)G@zFvzkEqYX`sXRa}E35H)M&)=wb&{)1j7`sFq$Ru#w| zAB~1#a}Gq+oB8z;xF$Z9?cP|gttGVxs0n00{%ADjCRY-wX*0KO0@v(6I6~Jgw@I(6 zz%}tPLJ`8hoe>iyWP3{Md(>Xo{ zjyxv!FWvWE_i_Df`Qdakt2VXj3ho`KvqSb9uPN+;lpoyY<2;F_M4OsodM(b7aKt=H|YC z7*+jyn_XrzTR%IU@1@XgF5COQ<(Iu>o7vWz+{dvREC2ND8XtF2K4h2Ro8JXg?v0a_ zM|LyHn`(Y-zi*SuCySu_xNIkFne;Jx?;S?vZJ9)?VSkU-y{XjsQF_Z-$*Uv z=Q{W~=cLc<=1aHu-`dl%uKqflzI4@o6{HoS*fAr+sanJ)`RDh!v~+*my>#U~%j?i| zC-)zBA9s`c?xSu;n(8Ga{>CocdVVWEQ(2PV`ubZNpMP6Fub-tZTW} z?E6LTLGrEX=9#0>xt7xB@jciU zLGWbQawhmVwb76|@anb1E+&`ge5XiV zBY$DP)qNDTaJui$&CjtBugz{6_SP%1#`0ZC`fglz6#Qnr4yFAkZUOqfj%=Ug+0q^h zm+>sQco@|;LW-}f|SC21blFkShj{;4cVTUS}M`Hjx{7hRu4hwR4$eKhhV**6b}y3k zPIu>0RQ_jDU%2%){cPn*j#l&cl=J9mf2rlSCtKRne@kjs*Y<3>r1WujZ9jj#2%uMEM8W638YWaY3kCx{16;r!X6*g=+Lwgt%)q_sXfo417p|T~D`)kUF?(xxRyjk@dZ+0_# z@2Ql5qUMz|v~u21`Yz;u&8Tc;@xRS7T1EC6hMcD@K8;1?pDfokx{uknzu5gi)}=3k ztoK(dN;--Yr?Ywpo}?#=oM*VE-Xb#IB>Oc-D%*KY&rJRIA2M1;-Hy?3uTNZD9wkne zZHW`vqx8m7_ezSCsDHMcvkRy4@Fu;n4L`2wHecO-ztklCq|X<@t@RoGp7SyWf%F>P_oct}VI<{VuJ(~Q zu>8YF`j!p{-N$|Zz4y``IEF2~Oh4}4tYv%IezW8`{}$};>6A~TT_WmZ7ClREv$?r! zOZta{OUEbr?>}Tb=KAeFt{dOyd?_jF)95-MDf3bJpPPP59?9c;6qOS7<;uB4r<3$M z_3z*1{9sv8te*OG-zei6vSh&`ru4?ex9g;{{Ugj}H1W$F(E%Rc}+|)iKF##Z6fRrmK66 zxB60{mV)HmynduUwUp#P_vtlp$=j*V6zP$azYM>^xuE19_n=TA=iEAeZKZm zIlR?r$?iDv@T+OALYrhrlY{m@gZ}>BSA-fl;v+UE_6ITblj6)`tGlvWh9K%!{lC~ zC+fL;+J9Lhtc>_MjtH1;8*h28MWB0MxMPXa>s9`xBDzOGbXVIbijIH7X>Tn3|A&Kp zU-n26D_=*l%*W4eI-TN2*~nk-{1VxJAc>YJR$jVJN9u1OV?}Ce(RG%G(d)4LC|8z!zmv4w zMSArRKVG^LSLa5H^;`8CyF~8!!F3ZSYoZlII-GL+RXin$7w|*|??xlHe$))9O zX8Ml?aD8ble+ifJM4xFABQgeti~~^JU0jOw@+R5uf>D0Cs>>RS%~O`1pFke>+OA8f zEpI#W%RiP>w1ABFU*wq0<&`>p)>L$!viv*+0RjXF5FkK+009C72oNAZfB*pk1PBly zK!5-N0t5&UAV7cs0RjXF5FkK+009C72oNAZfB*pk1PBlyK!5-N0t5&UAV7cs0RjXF z5FkK+009C72oNAZfB*pk1PBlyK!5-N0t5&UAV7cs0RjXF5FkK+009C72oNAZfB*pk z1PBlyK!5-N0t5&UAV7cs0RjXF5FkK+009C72oNAZfB*pk1PBlyK!5-N0t5&UAV7cs z0RjXF5FkK+009C72oNAZfB*pk1PBlyK!5-N0t5&UAV7cs0RjXF5FkK+009C72oNAZ zfB*pk1PBlyK!5-N0t5&UAV7cs0RjXF5FkK+009C72oNAZfB*pk1PBlyK!5-N0t5&U zAV7cs0RjXF5FkK+009C72oNAZfB*pk1PBlyK!5-N0t5&UAV7cs0RjXF5FkK+009C7 z2oNAZfB*pk1PBlyK!5-N0t5&UAV7cs0RjXF5FkK+009C72oNAZfB*pk1PBlyK!5-N z0t5&UAV7cs0RjXF5FkK+009C72oNAZfB*pk1PBlyK!5-N0t5&UAV7cs0RjXF5FkK+ z009C72oNAZfB*pk1PBlyK!5-N0t5&UAV7cs0RjXF5FkK+009C72oNAZfB*pk1PBly zK!5-N0t5&UAV7cs0RjXF5FkK+009C72oNAZfB*pk1PBlyK!5-N0t5&UAV7cs0RjXF z5FkK+009C72oNAZfB*pk1PBlyK!5-N0t5&UAV7cs0RjXF5FkK+009C72oNAZfB*pk z1PBlyK!5-N0t5&UAV7cs0RjXF5FkK+009C72oNAZfB*pk1PBlyK!5-N0t5&UAV7cs z0RjXF5FkK+009C72oNAZfB*pk1PBlyK!5-N0t5&UAV7cs0RjXF5FkK+009C72oNAZ zfB*pk1PBlyK!5-N0t5&UAV7cs0RjXF5FkK+009C72oNAZfB*pk1PBlyK!5-N0t5&U zAV7cs0RjXF5FkK+009C72oNAZfB*pk1PBlyK!5-N0t5&UAV7cs0RjXF5FkK+009C7 z2oNAZfB*pk1PBlyK!5-N0t5&UAV7cs0RjXF5FkK+009C72oNAZfB*pk1PBlyK!5-N z0t5&UAV7cs0RjXF5FkK+009C72oNAZfB*pk1PBlyK!5-N0t5&UAV7cs0RjXF5FkK+ z009C72oNAZfB*pk1PBlyK!5-N0t5&UAV7cs0RjXF5FkK+009C72oNAZfB*pk1PBly zK!5-N0t5&UAV7cs0RjXF5FkK+009C72oNAZfB*pk1PBlyK!5-N0t5&UAV7cs0RjXF z5FkK+009C72oNAZfB*pk1PBlyK!5-N0t5&UAV7cs0RjXF5FkK+009C72oNAZfB*pk z1PBlyK!5-N0t5&UAV7cs0RjXF5FkK+009C72oNAZfB*pk1PBlyK!5-N0t5&UAV7cs z0RjXF5FkK+009C72oNAZfB*pk1PBlyK!5-N0t5&UAV7cs0RjXF5FkK+009C72oNAZ zfB*pk1PBlyK!5-N0t5&UAV7cs0RjXF5FkK+009C72oNAZfB*pk1PBlyK!5-N0t5&U zAV7cs0RjXF5FkKcp1^(YzISsmS+2UV++4)gd3)PNVEY9kMO<}z=V_m9wgT%SbE^D{tX7RTqv4#zirf$oZwd<3ci_LKqlI;_?xkx+XG-*XH zKdP6S-Q#lms=I8yoTZHJ@u>c=|2`>WHgQ!75Bo{$s=Iwx#TjjiK+>unw0Ebmu2~%J zUDn3wTVq|6L22ju7Wwwv#=3<<7zI{d+@lq&vcA70@W7Qn)Qw52^>lz4z~rdMzw3z27X3>d&>9Hb;BiA&Iu& zIDI^fK9Oc|TElG?ANC&)pH9DSp0O_Jm*QOB-YZs~PQOyk;^yM%^s@F`nI1V??I`Vh zC6RTdis^CkmIhu9?{Kes{8jSIt8VxBwmdB}=E+3;`0JIb)7ZWG+C0@fZ)@*99)5*9 z^X4LH^-Hf`sb;bBO$u2)oxCobw>=_8l-t`~tGMqCl0{NST)SqmRo!-v<>UWrf0?(~ zzesrFj>vXDz1TP_Ru+-S_KAC0@#IkUl(c7{UrQ6S?=P;LuRZ&vR<(9CPSZ{{%4GJO z^S*v_QEI;2?wEJ2;tp;+*BMVi$1~8a>SgWmF#3YaZQ})=ixcxCw_{rt7mAfpJ!y^8 zhyl+T*5i-s@gerO@7e*dNtvM`5K@`SE>|W_yJ2gYfnn0%#``(CNJ zZkc(<%SPBl*_(~m{54`5N0|)MC`cqaI#rW@SqrQp(N6W$ojx&?HY8)w9~y zr1Z_^56FYpYAN~Sq&N4IA`Y~7Z_BM})GV_0>gHbgt8Vq+<1M3|68T9szPWnPszxas zy<+iv-uCvUf4uZ2e`40Ak22-1k)@PquSA}?Iq7Act@G8dMWry@8`ZbBH+^-XZ9PKd zm;F-YmyugWUKvlter_*VjK4`*v*SHCL%(0TtUaB6uFY!osOSEAGko(9ib|nhDpu|q z*C$b)eEiH^_r0{!J#JOstM6*HYY^qgRyY_ayL7QX);&dOFt*GhYgau#>4*Rl^nA%Aks+ZTle^Vo_`Cu7}?!8n=I2O0pc1= z<{MmZdYU~y>u(W=JJB&V=CXFz$mY45&T~cTPE}7 z-8JqSqxxm7RjnQrE4St0-u$`4-V)l6+BZts>cRS+qKcf8I5wjz}<{M`HD_O1;?Zwr5*5-eXtprvT z=$EqgNH)W4Rs+gLYRC7eCrwJ&aXLu4$4M(2A3R%f{yHl1Z6S&=l%x7MeS0dMt)ac` zGwn^_EduS`>cOZ!sV`pEV(EN~I#O0Mx-GAg0$vZv>N5zw{u%ZlkY|B@sdlvdn9@9- zxv&#hL}0i#NS3|BzsT|JLm=M*#mcfX2Izm-q7)egcaLEZ2v%*s<+JAnyXw zN8YcX@V}pc`qlf{I&%I6+PjUj*>=UE>-j%(XF%X}fmT)j?(0hS_?iOl4}Q(-?yMIR lSpGj$E_Qr-5y-E=D*xd)zh^Dv1QrqKmy*` 0 and (now - last_motion) > 5: - last_motion = -1 - if motion_detected.is_set(): - motion_detected.clear() - with motion_changed: - motion_changed.notify_all() - - - with frame_ready: - # if there isnt a frame ready for processing or it is old, wait for a signal - if shared_frame_time.value == frame_time or (now - shared_frame_time.value) > 0.5: - frame_ready.wait() - - # lock and make a copy of the cropped frame - with frame_lock: - cropped_frame = arr[region_y_offset:region_y_offset+region_size, region_x_offset:region_x_offset+region_size].copy() - frame_time = shared_frame_time.value - - # convert to grayscale - gray = cv2.cvtColor(cropped_frame, cv2.COLOR_BGR2GRAY) - - # apply image mask to remove areas from motion detection - gray[mask] = [255] - - # apply gaussian blur - gray = cv2.GaussianBlur(gray, (21, 21), 0) - - if avg_frame is None: - avg_frame = gray.copy().astype("float") - continue - - # look at the delta from the avg_frame - frameDelta = cv2.absdiff(gray, cv2.convertScaleAbs(avg_frame)) - - if avg_delta is None: - avg_delta = frameDelta.copy().astype("float") - - # compute the average delta over the past few frames - # the alpha value can be modified to configure how sensitive the motion detection is. - # higher values mean the current frame impacts the delta a lot, and a single raindrop may - # register as motion, too low and a fast moving person wont be detected as motion - # this also assumes that a person is in the same location across more than a single frame - cv2.accumulateWeighted(frameDelta, avg_delta, 0.2) - - # compute the threshold image for the current frame - current_thresh = cv2.threshold(frameDelta, 25, 255, cv2.THRESH_BINARY)[1] - - # black out everything in the avg_delta where there isnt motion in the current frame - avg_delta_image = cv2.convertScaleAbs(avg_delta) - avg_delta_image[np.where(current_thresh==[0])] = [0] - - # then look for deltas above the threshold, but only in areas where there is a delta - # in the current frame. this prevents deltas from previous frames from being included - thresh = cv2.threshold(avg_delta_image, 25, 255, cv2.THRESH_BINARY)[1] - - # dilate the thresholded image to fill in holes, then find contours - # on thresholded image - thresh = cv2.dilate(thresh, None, iterations=2) - cnts = cv2.findContours(thresh.copy(), cv2.RETR_EXTERNAL, cv2.CHAIN_APPROX_SIMPLE) - cnts = imutils.grab_contours(cnts) - - motion_found = False - - # loop over the contours - for c in cnts: - # if the contour is big enough, count it as motion - contour_area = cv2.contourArea(c) - if contour_area > min_motion_area: - motion_found = True - if debug: - cv2.drawContours(cropped_frame, [c], -1, (0, 255, 0), 2) - x, y, w, h = cv2.boundingRect(c) - cv2.putText(cropped_frame, str(contour_area), (x, y), - cv2.FONT_HERSHEY_SIMPLEX, 0.5, (0, 100, 0), 2) - else: - break - - if motion_found: - motion_frames += 1 - # if there have been enough consecutive motion frames, report motion - if motion_frames >= 3: - # only average in the current frame if the difference persists for at least 3 frames - cv2.accumulateWeighted(gray, avg_frame, 0.01) - motion_detected.set() - with motion_changed: - motion_changed.notify_all() - last_motion = now - else: - # when no motion, just keep averaging the frames together - cv2.accumulateWeighted(gray, avg_frame, 0.01) - motion_frames = 0 - - if debug and motion_frames == 3: - cv2.imwrite("/lab/debug/motion-{}-{}-{}.jpg".format(region_x_offset, region_y_offset, datetime.datetime.now().timestamp()), cropped_frame) - cv2.imwrite("/lab/debug/avg_delta-{}-{}-{}.jpg".format(region_x_offset, region_y_offset, datetime.datetime.now().timestamp()), avg_delta_image) diff --git a/frigate/mqtt.py b/frigate/mqtt.py index fbd401776..0a7bd6de7 100644 --- a/frigate/mqtt.py +++ b/frigate/mqtt.py @@ -1,29 +1,6 @@ import json import threading -class MqttMotionPublisher(threading.Thread): - def __init__(self, client, topic_prefix, motion_changed, motion_flags): - threading.Thread.__init__(self) - self.client = client - self.topic_prefix = topic_prefix - self.motion_changed = motion_changed - self.motion_flags = motion_flags - - def run(self): - last_sent_motion = "" - while True: - with self.motion_changed: - self.motion_changed.wait() - - # send message for motion - motion_status = 'OFF' - if any(obj.is_set() for obj in self.motion_flags): - motion_status = 'ON' - - if last_sent_motion != motion_status: - last_sent_motion = motion_status - self.client.publish(self.topic_prefix+'/motion', motion_status, retain=False) - class MqttObjectPublisher(threading.Thread): def __init__(self, client, topic_prefix, objects_parsed, detected_objects): threading.Thread.__init__(self) diff --git a/frigate/object_detection.py b/frigate/object_detection.py index f845920a6..76050f1ec 100644 --- a/frigate/object_detection.py +++ b/frigate/object_detection.py @@ -36,13 +36,10 @@ class PreppedQueueProcessor(threading.Thread): # process queue... while True: frame = self.prepped_frame_queue.get() - # print(self.prepped_frame_queue.qsize()) + # Actual detection. objects = self.engine.DetectWithInputTensor(frame['frame'], threshold=0.5, top_k=3) - # time.sleep(0.1) - # objects = [] - # print(self.engine.get_inference_time()) - # put detected objects in the queue + # parse and pass detected objects back to the camera parsed_objects = [] for obj in objects: box = obj.bounding_box.flatten().tolist() @@ -99,7 +96,6 @@ class FramePrepper(threading.Thread): # Expand dimensions since the model expects images to have shape: [1, 300, 300, 3] frame_expanded = np.expand_dims(cropped_frame_rgb, axis=0) - # print("Prepped frame at " + str(self.region_x_offset) + "," + str(self.region_y_offset)) # add the frame to the queue if not self.prepped_frame_queue.full(): self.prepped_frame_queue.put({ diff --git a/frigate/objects.py b/frigate/objects.py index 66672cf1d..5c5a2e8ac 100644 --- a/frigate/objects.py +++ b/frigate/objects.py @@ -3,18 +3,6 @@ import datetime import threading import cv2 from object_detection.utils import visualization_utils as vis_util -class ObjectParser(threading.Thread): - def __init__(self, cameras, object_queue, detected_objects, regions): - threading.Thread.__init__(self) - self.cameras = cameras - self.object_queue = object_queue - self.regions = regions - - def run(self): - # frame_times = {} - while True: - obj = self.object_queue.get() - self.cameras[obj['camera_name']].add_object(obj) class ObjectCleaner(threading.Thread): def __init__(self, objects_parsed, detected_objects): @@ -34,7 +22,6 @@ class ObjectCleaner(threading.Thread): # (newest objects are appended to the end) detected_objects = self._detected_objects.copy() - #print([round(now-obj['frame_time'],2) for obj in detected_objects]) num_to_delete = 0 for obj in detected_objects: if now-obj['frame_time']<2: @@ -69,8 +56,6 @@ class BestPersonFrame(threading.Thread): # make a copy of detected objects detected_objects = self.detected_objects.copy() detected_people = [obj for obj in detected_objects if obj['name'] == 'person'] - # make a copy of the recent frames - recent_frames = self.recent_frames.copy() # get the highest scoring person new_best_person = max(detected_people, key=lambda x:x['score'], default=self.best_person) @@ -89,7 +74,10 @@ class BestPersonFrame(threading.Thread): # or the current person is more than 1 minute old, use the new best person if new_best_person['score'] > self.best_person['score'] or (now - self.best_person['frame_time']) > 60: self.best_person = new_best_person - + + # make a copy of the recent frames + recent_frames = self.recent_frames.copy() + if not self.best_person is None and self.best_person['frame_time'] in recent_frames: best_frame = recent_frames[self.best_person['frame_time']] best_frame = cv2.cvtColor(best_frame, cv2.COLOR_BGR2RGB) diff --git a/frigate/video.py b/frigate/video.py index 903064cc1..41b77f9ba 100644 --- a/frigate/video.py +++ b/frigate/video.py @@ -8,11 +8,10 @@ import multiprocessing as mp from object_detection.utils import visualization_utils as vis_util from . util import tonumpyarray from . object_detection import FramePrepper -from . objects import ObjectCleaner, ObjectParser, BestPersonFrame +from . objects import ObjectCleaner, BestPersonFrame from . mqtt import MqttObjectPublisher -# fetch the frames as fast a possible, only decoding the frames when the -# detection_process has consumed the current frame +# fetch the frames as fast a possible and store current frame in a shared memory array def fetch_frames(shared_arr, shared_frame_time, frame_lock, frame_ready, frame_shape, rtsp_url): # convert shared memory array into numpy and shape into image array arr = tonumpyarray(shared_arr).reshape(frame_shape)