微信公众号搜"智元新知"关注
微信扫一扫可直接关注哦!

如何将 RasPi Python 代码转换为与 JetsonNano 兼容的 Python 代码

如何解决如何将 RasPi Python 代码转换为与 JetsonNano 兼容的 Python 代码

我正在尝试在带有 2 个 raspi 相机的 jetson nano 上创建立体视觉相机。但是,我可以在网上找到很多关于 RasPi 的信息和代码,但找不到 jetson nano。例如,假设我有这两个 python 程序,第一个用于在 Jetson nano 上启动两个摄像头,第二个用于在 RasPi 上启动两个摄像头。我对这一切都很陌生,所以如果能得到一些关于如何开始这方面的建议会很棒。谢谢!

Jetson(取自 JetsonHacks):

   # MIT License
# copyright (c) 2019,2020 JetsonHacks
# See license
# A very simple code snippet
# Using two  CSI cameras (such as the RaspBerry Pi Version 2) connected to a
# NVIDIA Jetson Nano Developer Kit (Rev B01) using OpenCV
# Drivers for the camera and OpenCV are included in the base image in JetPack 4.3+

# This script will open a window and place the camera stream from each camera in a window
# arranged horizontally.
# The camera streams are each read in their own thread,as when done sequentially there
# is a noticeable lag
# For better performance,the next step would be to experiment with having the window display
# in a separate thread

import cv2
import threading
import numpy as np

# gstreamer_pipeline returns a GStreamer pipeline for capturing from the CSI camera
# Flip the image by setting the flip_method (most common values: 0 and 2)
# display_width and display_height determine the size of each camera pane in the window on the screen

left_camera = None
right_camera = None


class CSI_Camera:

    def __init__ (self) :
        # Initialize instance variables
        # OpenCV video capture element
        self.video_capture = None
        # The last captured image from the camera
        self.frame = None
        self.grabbed = False
        # The thread where the video capture runs
        self.read_thread = None
        self.read_lock = threading.Lock()
        self.running = False


    def open(self,gstreamer_pipeline_string):
        try:
            self.video_capture = cv2.VideoCapture(
                gstreamer_pipeline_string,cv2.CAP_GSTREAMER
            )
            
        except RuntimeError:
            self.video_capture = None
            print("Unable to open camera")
            print("Pipeline: " + gstreamer_pipeline_string)
            return
        # Grab the first frame to start the video capturing
        self.grabbed,self.frame = self.video_capture.read()

    def start(self):
        if self.running:
            print('Video capturing is already running')
            return None
        # create a thread to read the camera image
        if self.video_capture != None:
            self.running=True
            self.read_thread = threading.Thread(target=self.updateCamera)
            self.read_thread.start()
        return self

    def stop(self):
        self.running=False
        self.read_thread.join()

    def updateCamera(self):
        # This is the thread to read images from the camera
        while self.running:
            try:
                grabbed,frame = self.video_capture.read()
                with self.read_lock:
                    self.grabbed=grabbed
                    self.frame=frame
            except RuntimeError:
                print("Could not read image from camera")
        # FIX ME - stop and cleanup thread
        # Something bad happened
        

    def read(self):
        with self.read_lock:
            frame = self.frame.copy()
            grabbed=self.grabbed
        return grabbed,frame

    def release(self):
        if self.video_capture != None:
            self.video_capture.release()
            self.video_capture = None
        # Now kill the thread
        if self.read_thread != None:
            self.read_thread.join()


# Currently there are setting frame rate on CSI Camera on Nano through gstreamer
# Here we directly select sensor_mode 3 (1280x720,59.9999 fps)
def gstreamer_pipeline(
    sensor_id=0,sensor_mode=3,capture_width=1280,capture_height=720,display_width=1280,display_height=720,framerate=30,flip_method=0,):
    return (
        "nvarguscamerasrc sensor-id=%d sensor-mode=%d ! "
        "video/x-raw(memory:NVMM),"
        "width=(int)%d,height=(int)%d,"
        "format=(string)NV12,framerate=(fraction)%d/1 ! "
        "nvvidconv flip-method=%d ! "
        "video/x-raw,width=(int)%d,format=(string)BGRx ! "
        "videoconvert ! "
        "video/x-raw,format=(string)BGR ! appsink"
        % (
            sensor_id,sensor_mode,capture_width,capture_height,framerate,flip_method,display_width,display_height,)
    )


def start_cameras():
    left_camera = CSI_Camera()
    left_camera.open(
        gstreamer_pipeline(
            sensor_id=0,display_height=540,display_width=960,)
    )
    left_camera.start()

    right_camera = CSI_Camera()
    right_camera.open(
        gstreamer_pipeline(
            sensor_id=1,)
    )
    right_camera.start()

    cv2.namedWindow("CSI Cameras",cv2.WINDOW_AUTOSIZE)

    if (
        not left_camera.video_capture.isOpened()
        or not right_camera.video_capture.isOpened()
    ):
        # Cameras did not open,or no camera attached

        print("Unable to open any cameras")
        # Todo: Proper Cleanup
        SystemExit(0)

    while cv2.getwindowProperty("CSI Cameras",0) >= 0 :
        
        _,left_image=left_camera.read()
        _,right_image=right_camera.read()
        camera_images = np.hstack((left_image,right_image))
        cv2.imshow("CSI Cameras",camera_images)

        # This also acts as
        keyCode = cv2.waitKey(30) & 0xFF
        # Stop the program on the ESC key
        if keyCode == 27:
            break

    left_camera.stop()
    left_camera.release()
    right_camera.stop()
    right_camera.release()
    cv2.destroyAllWindows()


if __name__ == "__main__":
    start_cameras()

RasPi(来自 https://github.com/realizator/stereopi-tutorial/blob/master/1_test.py):

# copyright (C) 2019 Eugene Pomazov,<stereopi.com>,virt2real team
#
# This file is part of StereoPi tutorial scripts.
#
# StereoPi tutorial is free software: you can redistribute it 
# and/or modify it under the terms of the GNU General Public License
# as published by the Free Software Foundation,either version 3 of the 
# License,or (at your option) any later version.
#
# StereoPi tutorial is distributed in the hope that it will be useful,# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or fitness FOR A PARTIculaR PURPOSE.  See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with StereoPi tutorial.  
# If not,see <http://www.gnu.org/licenses/>.
#
# Most of this code is updated version of 3dBerry.org project by virt2real
# 
# Thanks to Adrian and http://pyimagesearch.com,as there are lot of
# code in this tutorial was taken from his lessons.
# 


import picamera
from picamera import PiCamera
import time
import cv2
import numpy as np
import os
from datetime import datetime


# File for captured image
filename = './scenes/photo.png'

# Camera settimgs
cam_width = 1280
cam_height = 480

# Final image capture settings
scale_ratio = 0.5

# Camera resolution height must be dividable by 16,and width by 32
cam_width = int((cam_width+31)/32)*32
cam_height = int((cam_height+15)/16)*16
print ("Used camera resolution: "+str(cam_width)+" x "+str(cam_height))

# Buffer for captured image settings
img_width = int (cam_width * scale_ratio)
img_height = int (cam_height * scale_ratio)
capture = np.zeros((img_height,img_width,4),dtype=np.uint8)
print ("Scaled image resolution: "+str(img_width)+" x "+str(img_height))

# Initialize the camera
camera = PiCamera(stereo_mode='side-by-side',stereo_decimate=False)
camera.resolution=(cam_width,cam_height)
camera.framerate = 20
camera.hflip = True


t2 = datetime.Now()
counter = 0
avgtime = 0
# Capture frames from the camera
for frame in camera.capture_continuous(capture,format="bgra",use_video_port=True,resize=(img_width,img_height)):
    counter+=1
    t1 = datetime.Now()
    timediff = t1-t2
    avgtime = avgtime + (timediff.total_seconds())
    cv2.imshow("pair",frame)
    key = cv2.waitKey(1) & 0xFF
    t2 = datetime.Now()
    # if the `q` key was pressed,break from the loop and save last image
    if key == ord("q") :
        avgtime = avgtime/counter
        print ("Average time between frames: " + str(avgtime))
        print ("Average FPS: " + str(1/avgtime))
        if (os.path.isdir("./scenes")==False):
            os.makedirs("./scenes")
        cv2.imwrite(filename,frame)
        break

版权声明:本文内容由互联网用户自发贡献,该文观点与技术仅代表作者本人。本站仅提供信息存储空间服务,不拥有所有权,不承担相关法律责任。如发现本站有涉嫌侵权/违法违规的内容, 请发送邮件至 dio@foxmail.com 举报,一经查实,本站将立刻删除。