openpilot v0.9.6 release

date: 2024-01-12T10:13:37
master commit: ba792d576a49a0899b88a753fa1c52956bedf9e6
This commit is contained in:
FrogAi
2024-01-12 22:39:28 -07:00
commit 08e9fb1edc
1881 changed files with 653708 additions and 0 deletions

View File

@@ -0,0 +1,110 @@
import asyncio
import io
from typing import Optional, List, Tuple
import aiortc
import av
import numpy as np
import pyaudio
class AudioInputStreamTrack(aiortc.mediastreams.AudioStreamTrack):
PYAUDIO_TO_AV_FORMAT_MAP = {
pyaudio.paUInt8: 'u8',
pyaudio.paInt16: 's16',
pyaudio.paInt24: 's24',
pyaudio.paInt32: 's32',
pyaudio.paFloat32: 'flt',
}
def __init__(self, audio_format: int = pyaudio.paInt16, rate: int = 16000, channels: int = 1, packet_time: float = 0.020, device_index: Optional[int] = None):
super().__init__()
self.p = pyaudio.PyAudio()
chunk_size = int(packet_time * rate)
self.stream = self.p.open(format=audio_format,
channels=channels,
rate=rate,
frames_per_buffer=chunk_size,
input=True,
input_device_index=device_index)
self.format = audio_format
self.rate = rate
self.channels = channels
self.packet_time = packet_time
self.chunk_size = chunk_size
self.pts = 0
async def recv(self):
mic_data = self.stream.read(self.chunk_size)
mic_array = np.frombuffer(mic_data, dtype=np.int16)
mic_array = np.expand_dims(mic_array, axis=0)
layout = 'stereo' if self.channels > 1 else 'mono'
frame = av.AudioFrame.from_ndarray(mic_array, format=self.PYAUDIO_TO_AV_FORMAT_MAP[self.format], layout=layout)
frame.rate = self.rate
frame.pts = self.pts
self.pts += frame.samples
return frame
class AudioOutputSpeaker:
def __init__(self, audio_format: int = pyaudio.paInt16, rate: int = 48000, channels: int = 2, packet_time: float = 0.2, device_index: Optional[int] = None):
chunk_size = int(packet_time * rate)
self.p = pyaudio.PyAudio()
self.buffer = io.BytesIO()
self.channels = channels
self.stream = self.p.open(format=audio_format,
channels=channels,
rate=rate,
frames_per_buffer=chunk_size,
output=True,
output_device_index=device_index,
stream_callback=self.__pyaudio_callback)
self.tracks_and_tasks: List[Tuple[aiortc.MediaStreamTrack, Optional[asyncio.Task]]] = []
def __pyaudio_callback(self, in_data, frame_count, time_info, status):
if self.buffer.getbuffer().nbytes < frame_count * self.channels * 2:
buff = b'\x00\x00' * frame_count * self.channels
elif self.buffer.getbuffer().nbytes > 115200: # 3x the usual read size
self.buffer.seek(0)
buff = self.buffer.read(frame_count * self.channels * 4)
buff = buff[:frame_count * self.channels * 2]
self.buffer.seek(2)
else:
self.buffer.seek(0)
buff = self.buffer.read(frame_count * self.channels * 2)
self.buffer.seek(2)
return (buff, pyaudio.paContinue)
async def __consume(self, track):
while True:
try:
frame = await track.recv()
except aiortc.MediaStreamError:
return
self.buffer.write(bytes(frame.planes[0]))
def hasTrack(self, track: aiortc.MediaStreamTrack) -> bool:
return any(t == track for t, _ in self.tracks_and_tasks)
def addTrack(self, track: aiortc.MediaStreamTrack):
if not self.hasTrack(track):
self.tracks_and_tasks.append((track, None))
def start(self):
for index, (track, task) in enumerate(self.tracks_and_tasks):
if task is None:
self.tracks_and_tasks[index] = (track, asyncio.create_task(self.__consume(track)))
def stop(self):
for _, task in self.tracks_and_tasks:
if task is not None:
task.cancel()
self.tracks_and_tasks = []
self.stream.stop_stream()
self.stream.close()
self.p.terminate()

View File

@@ -0,0 +1,69 @@
import asyncio
from typing import Optional
import av
from teleoprtc.tracks import TiciVideoStreamTrack
from cereal import messaging
from openpilot.tools.lib.framereader import FrameReader
from openpilot.common.realtime import DT_MDL, DT_DMON
class LiveStreamVideoStreamTrack(TiciVideoStreamTrack):
camera_to_sock_mapping = {
"driver": "livestreamDriverEncodeData",
"wideRoad": "livestreamWideRoadEncodeData",
"road": "livestreamRoadEncodeData",
}
def __init__(self, camera_type: str):
dt = DT_DMON if camera_type == "driver" else DT_MDL
super().__init__(camera_type, dt)
self._sock = messaging.sub_sock(self.camera_to_sock_mapping[camera_type], conflate=True)
self._pts = 0
async def recv(self):
while True:
msg = messaging.recv_one_or_none(self._sock)
if msg is not None:
break
await asyncio.sleep(0.005)
evta = getattr(msg, msg.which())
packet = av.Packet(evta.header + evta.data)
packet.time_base = self._time_base
packet.pts = self._pts
self.log_debug("track sending frame %s", self._pts)
self._pts += self._dt * self._clock_rate
return packet
def codec_preference(self) -> Optional[str]:
return "H264"
class FrameReaderVideoStreamTrack(TiciVideoStreamTrack):
def __init__(self, input_file: str, dt: float = DT_MDL, camera_type: str = "driver"):
super().__init__(camera_type, dt)
frame_reader = FrameReader(input_file)
self._frames = [frame_reader.get(i, pix_fmt="rgb24") for i in range(frame_reader.frame_count)]
self._frame_count = len(self.frames)
self._frame_index = 0
self._pts = 0
async def recv(self):
self.log_debug("track sending frame %s", self._pts)
img = self._frames[self._frame_index]
new_frame = av.VideoFrame.from_ndarray(img, format="rgb24")
new_frame.pts = self._pts
new_frame.time_base = self._time_base
self._frame_index = (self._frame_index + 1) % self._frame_count
self._pts = await self.next_pts(self._pts)
return new_frame