add wav2lip stream
parent
39d7aff90a
commit
592312ab8c
@ -0,0 +1,98 @@
|
||||
import time
|
||||
import torch
|
||||
import numpy as np
|
||||
import soundfile as sf
|
||||
import resampy
|
||||
|
||||
import queue
|
||||
from queue import Queue
|
||||
from io import BytesIO
|
||||
import multiprocessing as mp
|
||||
|
||||
from wav2lip import audio
|
||||
|
||||
class LipASR:
|
||||
def __init__(self, opt):
|
||||
self.opt = opt
|
||||
|
||||
self.fps = opt.fps # 20 ms per frame
|
||||
self.sample_rate = 16000
|
||||
self.chunk = self.sample_rate // self.fps # 320 samples per chunk (20ms * 16000 / 1000)
|
||||
self.queue = Queue()
|
||||
# self.input_stream = BytesIO()
|
||||
self.output_queue = mp.Queue()
|
||||
|
||||
#self.audio_processor = audio_processor
|
||||
self.batch_size = opt.batch_size
|
||||
|
||||
self.frames = []
|
||||
self.stride_left_size = self.stride_right_size = 10
|
||||
self.context_size = 10
|
||||
self.audio_feats = []
|
||||
self.feat_queue = mp.Queue(5)
|
||||
|
||||
self.warm_up()
|
||||
|
||||
def put_audio_frame(self,audio_chunk): #16khz 20ms pcm
|
||||
self.queue.put(audio_chunk)
|
||||
|
||||
def __get_audio_frame(self):
|
||||
try:
|
||||
frame = self.queue.get(block=True,timeout=0.018)
|
||||
type = 0
|
||||
#print(f'[INFO] get frame {frame.shape}')
|
||||
except queue.Empty:
|
||||
frame = np.zeros(self.chunk, dtype=np.float32)
|
||||
type = 1
|
||||
|
||||
return frame,type
|
||||
|
||||
def get_audio_out(self): #get origin audio pcm to nerf
|
||||
return self.output_queue.get()
|
||||
|
||||
def warm_up(self):
|
||||
for _ in range(self.stride_left_size + self.stride_right_size):
|
||||
audio_frame,type=self.__get_audio_frame()
|
||||
self.frames.append(audio_frame)
|
||||
self.output_queue.put((audio_frame,type))
|
||||
for _ in range(self.stride_left_size):
|
||||
self.output_queue.get()
|
||||
|
||||
def run_step(self):
|
||||
############################################## extract audio feature ##############################################
|
||||
# get a frame of audio
|
||||
for _ in range(self.batch_size*2):
|
||||
frame,type = self.__get_audio_frame()
|
||||
self.frames.append(frame)
|
||||
# put to output
|
||||
self.output_queue.put((frame,type))
|
||||
# context not enough, do not run network.
|
||||
if len(self.frames) < self.stride_left_size + self.context_size + self.stride_right_size:
|
||||
return
|
||||
|
||||
inputs = np.concatenate(self.frames) # [N * chunk]
|
||||
mel = audio.melspectrogram(inputs)
|
||||
#print(mel.shape[0],mel.shape,len(mel[0]),len(self.frames))
|
||||
# cut off stride
|
||||
left = max(0, self.stride_left_size*80/50)
|
||||
right = min(len(mel[0]), len(mel[0]) - self.stride_right_size*80/50)
|
||||
mel_idx_multiplier = 80.*2/self.fps
|
||||
mel_step_size = 16
|
||||
i = 0
|
||||
mel_chunks = []
|
||||
while i < (len(self.frames)-self.stride_left_size-self.stride_right_size)/2:
|
||||
start_idx = int(left + i * mel_idx_multiplier)
|
||||
#print(start_idx)
|
||||
if start_idx + mel_step_size > len(mel[0]):
|
||||
mel_chunks.append(mel[:, len(mel[0]) - mel_step_size:])
|
||||
else:
|
||||
mel_chunks.append(mel[:, start_idx : start_idx + mel_step_size])
|
||||
i += 1
|
||||
self.feat_queue.put(mel_chunks)
|
||||
|
||||
# discard the old part to save memory
|
||||
self.frames = self.frames[-(self.stride_left_size + self.stride_right_size):]
|
||||
|
||||
|
||||
def get_next_feat(self,block,timeout):
|
||||
return self.feat_queue.get(block,timeout)
|
@ -0,0 +1,125 @@
|
||||
from os import listdir, path
|
||||
import numpy as np
|
||||
import scipy, cv2, os, sys, argparse
|
||||
import json, subprocess, random, string
|
||||
from tqdm import tqdm
|
||||
from glob import glob
|
||||
import torch
|
||||
import pickle
|
||||
import face_detection
|
||||
|
||||
|
||||
parser = argparse.ArgumentParser(description='Inference code to lip-sync videos in the wild using Wav2Lip models')
|
||||
parser.add_argument('--img_size', default=96, type=int)
|
||||
parser.add_argument('--avatar_id', default='wav2lip_avatar1', type=str)
|
||||
parser.add_argument('--video_path', default='', type=str)
|
||||
parser.add_argument('--nosmooth', default=False, action='store_true',
|
||||
help='Prevent smoothing face detections over a short temporal window')
|
||||
parser.add_argument('--pads', nargs='+', type=int, default=[0, 10, 0, 0],
|
||||
help='Padding (top, bottom, left, right). Please adjust to include chin at least')
|
||||
parser.add_argument('--face_det_batch_size', type=int,
|
||||
help='Batch size for face detection', default=16)
|
||||
args = parser.parse_args()
|
||||
|
||||
device = 'cuda' if torch.cuda.is_available() else 'cpu'
|
||||
print('Using {} for inference.'.format(device))
|
||||
|
||||
def osmakedirs(path_list):
|
||||
for path in path_list:
|
||||
os.makedirs(path) if not os.path.exists(path) else None
|
||||
|
||||
def video2imgs(vid_path, save_path, ext = '.png',cut_frame = 10000000):
|
||||
cap = cv2.VideoCapture(vid_path)
|
||||
count = 0
|
||||
while True:
|
||||
if count > cut_frame:
|
||||
break
|
||||
ret, frame = cap.read()
|
||||
if ret:
|
||||
cv2.imwrite(f"{save_path}/{count:08d}.png", frame)
|
||||
count += 1
|
||||
else:
|
||||
break
|
||||
|
||||
def read_imgs(img_list):
|
||||
frames = []
|
||||
print('reading images...')
|
||||
for img_path in tqdm(img_list):
|
||||
frame = cv2.imread(img_path)
|
||||
frames.append(frame)
|
||||
return frames
|
||||
|
||||
def get_smoothened_boxes(boxes, T):
|
||||
for i in range(len(boxes)):
|
||||
if i + T > len(boxes):
|
||||
window = boxes[len(boxes) - T:]
|
||||
else:
|
||||
window = boxes[i : i + T]
|
||||
boxes[i] = np.mean(window, axis=0)
|
||||
return boxes
|
||||
|
||||
def face_detect(images):
|
||||
detector = face_detection.FaceAlignment(face_detection.LandmarksType._2D,
|
||||
flip_input=False, device=device)
|
||||
|
||||
batch_size = args.face_det_batch_size
|
||||
|
||||
while 1:
|
||||
predictions = []
|
||||
try:
|
||||
for i in tqdm(range(0, len(images), batch_size)):
|
||||
predictions.extend(detector.get_detections_for_batch(np.array(images[i:i + batch_size])))
|
||||
except RuntimeError:
|
||||
if batch_size == 1:
|
||||
raise RuntimeError('Image too big to run face detection on GPU. Please use the --resize_factor argument')
|
||||
batch_size //= 2
|
||||
print('Recovering from OOM error; New batch size: {}'.format(batch_size))
|
||||
continue
|
||||
break
|
||||
|
||||
results = []
|
||||
pady1, pady2, padx1, padx2 = args.pads
|
||||
for rect, image in zip(predictions, images):
|
||||
if rect is None:
|
||||
cv2.imwrite('temp/faulty_frame.jpg', image) # check this frame where the face was not detected.
|
||||
raise ValueError('Face not detected! Ensure the video contains a face in all the frames.')
|
||||
|
||||
y1 = max(0, rect[1] - pady1)
|
||||
y2 = min(image.shape[0], rect[3] + pady2)
|
||||
x1 = max(0, rect[0] - padx1)
|
||||
x2 = min(image.shape[1], rect[2] + padx2)
|
||||
|
||||
results.append([x1, y1, x2, y2])
|
||||
|
||||
boxes = np.array(results)
|
||||
if not args.nosmooth: boxes = get_smoothened_boxes(boxes, T=5)
|
||||
results = [[image[y1: y2, x1:x2], (y1, y2, x1, x2)] for image, (x1, y1, x2, y2) in zip(images, boxes)]
|
||||
|
||||
del detector
|
||||
return results
|
||||
|
||||
if __name__ == "__main__":
|
||||
avatar_path = f"./results/avatars/{args.avatar_id}"
|
||||
full_imgs_path = f"{avatar_path}/full_imgs"
|
||||
face_imgs_path = f"{avatar_path}/face_imgs"
|
||||
coords_path = f"{avatar_path}/coords.pkl"
|
||||
osmakedirs([avatar_path,full_imgs_path,face_imgs_path])
|
||||
print(args)
|
||||
|
||||
#if os.path.isfile(args.video_path):
|
||||
video2imgs(args.video_path, full_imgs_path, ext = 'png')
|
||||
input_img_list = sorted(glob(os.path.join(full_imgs_path, '*.[jpJP][pnPN]*[gG]')))
|
||||
|
||||
frames = read_imgs(input_img_list)
|
||||
face_det_results = face_detect(frames)
|
||||
coord_list = []
|
||||
idx = 0
|
||||
for frame,coords in face_det_results:
|
||||
#x1, y1, x2, y2 = bbox
|
||||
resized_crop_frame = cv2.resize(frame,(args.img_size, args.img_size)) #,interpolation = cv2.INTER_LANCZOS4)
|
||||
cv2.imwrite(f"{face_imgs_path}/{idx:08d}.png", resized_crop_frame)
|
||||
coord_list.append(coords)
|
||||
idx = idx + 1
|
||||
|
||||
with open(coords_path, 'wb') as f:
|
||||
pickle.dump(coord_list, f)
|
@ -1,194 +1,205 @@
|
||||
|
||||
import asyncio
|
||||
import json
|
||||
import logging
|
||||
import threading
|
||||
import time
|
||||
from typing import Tuple, Dict, Optional, Set, Union
|
||||
from av.frame import Frame
|
||||
from av.packet import Packet
|
||||
from av import AudioFrame
|
||||
import fractions
|
||||
import numpy as np
|
||||
|
||||
AUDIO_PTIME = 0.020 # 20ms audio packetization
|
||||
VIDEO_CLOCK_RATE = 90000
|
||||
VIDEO_PTIME = 1 / 25 # 30fps
|
||||
VIDEO_TIME_BASE = fractions.Fraction(1, VIDEO_CLOCK_RATE)
|
||||
SAMPLE_RATE = 16000
|
||||
AUDIO_TIME_BASE = fractions.Fraction(1, SAMPLE_RATE)
|
||||
|
||||
#from aiortc.contrib.media import MediaPlayer, MediaRelay
|
||||
#from aiortc.rtcrtpsender import RTCRtpSender
|
||||
from aiortc import (
|
||||
MediaStreamTrack,
|
||||
)
|
||||
|
||||
logging.basicConfig()
|
||||
logger = logging.getLogger(__name__)
|
||||
|
||||
|
||||
class PlayerStreamTrack(MediaStreamTrack):
|
||||
"""
|
||||
A video track that returns an animated flag.
|
||||
"""
|
||||
|
||||
def __init__(self, player, kind):
|
||||
super().__init__() # don't forget this!
|
||||
self.kind = kind
|
||||
self._player = player
|
||||
self._queue = asyncio.Queue()
|
||||
if self.kind == 'video':
|
||||
self.framecount = 0
|
||||
self.lasttime = time.perf_counter()
|
||||
self.totaltime = 0
|
||||
|
||||
_start: float
|
||||
_timestamp: int
|
||||
|
||||
async def next_timestamp(self) -> Tuple[int, fractions.Fraction]:
|
||||
if self.readyState != "live":
|
||||
raise Exception
|
||||
|
||||
if self.kind == 'video':
|
||||
if hasattr(self, "_timestamp"):
|
||||
# self._timestamp = (time.time()-self._start) * VIDEO_CLOCK_RATE
|
||||
self._timestamp += int(VIDEO_PTIME * VIDEO_CLOCK_RATE)
|
||||
wait = self._start + (self._timestamp / VIDEO_CLOCK_RATE) - time.time()
|
||||
if wait>0:
|
||||
await asyncio.sleep(wait)
|
||||
else:
|
||||
self._start = time.time()
|
||||
self._timestamp = 0
|
||||
print('video start:',self._start)
|
||||
return self._timestamp, VIDEO_TIME_BASE
|
||||
else: #audio
|
||||
if hasattr(self, "_timestamp"):
|
||||
# self._timestamp = (time.time()-self._start) * SAMPLE_RATE
|
||||
self._timestamp += int(AUDIO_PTIME * SAMPLE_RATE)
|
||||
wait = self._start + (self._timestamp / SAMPLE_RATE) - time.time()
|
||||
if wait>0:
|
||||
await asyncio.sleep(wait)
|
||||
else:
|
||||
self._start = time.time()
|
||||
self._timestamp = 0
|
||||
print('audio start:',self._start)
|
||||
return self._timestamp, AUDIO_TIME_BASE
|
||||
|
||||
async def recv(self) -> Union[Frame, Packet]:
|
||||
# frame = self.frames[self.counter % 30]
|
||||
self._player._start(self)
|
||||
# if self.kind == 'video':
|
||||
# frame = await self._queue.get()
|
||||
# else: #audio
|
||||
# if hasattr(self, "_timestamp"):
|
||||
# wait = self._start + self._timestamp / SAMPLE_RATE + AUDIO_PTIME - time.time()
|
||||
# if wait>0:
|
||||
# await asyncio.sleep(wait)
|
||||
# if self._queue.qsize()<1:
|
||||
# #frame = AudioFrame(format='s16', layout='mono', samples=320)
|
||||
# audio = np.zeros((1, 320), dtype=np.int16)
|
||||
# frame = AudioFrame.from_ndarray(audio, layout='mono', format='s16')
|
||||
# frame.sample_rate=16000
|
||||
# else:
|
||||
# frame = await self._queue.get()
|
||||
# else:
|
||||
# frame = await self._queue.get()
|
||||
frame = await self._queue.get()
|
||||
pts, time_base = await self.next_timestamp()
|
||||
frame.pts = pts
|
||||
frame.time_base = time_base
|
||||
if frame is None:
|
||||
self.stop()
|
||||
raise Exception
|
||||
if self.kind == 'video':
|
||||
self.totaltime += (time.perf_counter() - self.lasttime)
|
||||
self.framecount += 1
|
||||
self.lasttime = time.perf_counter()
|
||||
if self.framecount==100:
|
||||
print(f"------actual avg final fps:{self.framecount/self.totaltime:.4f}")
|
||||
self.framecount = 0
|
||||
self.totaltime=0
|
||||
return frame
|
||||
|
||||
def stop(self):
|
||||
super().stop()
|
||||
if self._player is not None:
|
||||
self._player._stop(self)
|
||||
self._player = None
|
||||
|
||||
def player_worker_thread(
|
||||
quit_event,
|
||||
loop,
|
||||
container,
|
||||
audio_track,
|
||||
video_track
|
||||
):
|
||||
container.render(quit_event,loop,audio_track,video_track)
|
||||
|
||||
class HumanPlayer:
|
||||
|
||||
def __init__(
|
||||
self, nerfreal, format=None, options=None, timeout=None, loop=False, decode=True
|
||||
):
|
||||
self.__thread: Optional[threading.Thread] = None
|
||||
self.__thread_quit: Optional[threading.Event] = None
|
||||
|
||||
# examine streams
|
||||
self.__started: Set[PlayerStreamTrack] = set()
|
||||
self.__audio: Optional[PlayerStreamTrack] = None
|
||||
self.__video: Optional[PlayerStreamTrack] = None
|
||||
|
||||
self.__audio = PlayerStreamTrack(self, kind="audio")
|
||||
self.__video = PlayerStreamTrack(self, kind="video")
|
||||
|
||||
self.__container = nerfreal
|
||||
|
||||
|
||||
@property
|
||||
def audio(self) -> MediaStreamTrack:
|
||||
"""
|
||||
A :class:`aiortc.MediaStreamTrack` instance if the file contains audio.
|
||||
"""
|
||||
return self.__audio
|
||||
|
||||
@property
|
||||
def video(self) -> MediaStreamTrack:
|
||||
"""
|
||||
A :class:`aiortc.MediaStreamTrack` instance if the file contains video.
|
||||
"""
|
||||
return self.__video
|
||||
|
||||
def _start(self, track: PlayerStreamTrack) -> None:
|
||||
self.__started.add(track)
|
||||
if self.__thread is None:
|
||||
self.__log_debug("Starting worker thread")
|
||||
self.__thread_quit = threading.Event()
|
||||
self.__thread = threading.Thread(
|
||||
name="media-player",
|
||||
target=player_worker_thread,
|
||||
args=(
|
||||
self.__thread_quit,
|
||||
asyncio.get_event_loop(),
|
||||
self.__container,
|
||||
self.__audio,
|
||||
self.__video
|
||||
),
|
||||
)
|
||||
self.__thread.start()
|
||||
|
||||
def _stop(self, track: PlayerStreamTrack) -> None:
|
||||
self.__started.discard(track)
|
||||
|
||||
if not self.__started and self.__thread is not None:
|
||||
self.__log_debug("Stopping worker thread")
|
||||
self.__thread_quit.set()
|
||||
self.__thread.join()
|
||||
self.__thread = None
|
||||
|
||||
if not self.__started and self.__container is not None:
|
||||
#self.__container.close()
|
||||
self.__container = None
|
||||
|
||||
def __log_debug(self, msg: str, *args) -> None:
|
||||
logger.debug(f"HumanPlayer {msg}", *args)
|
||||
|
||||
import asyncio
|
||||
import json
|
||||
import logging
|
||||
import threading
|
||||
import time
|
||||
from typing import Tuple, Dict, Optional, Set, Union
|
||||
from av.frame import Frame
|
||||
from av.packet import Packet
|
||||
from av import AudioFrame
|
||||
import fractions
|
||||
import numpy as np
|
||||
|
||||
AUDIO_PTIME = 0.020 # 20ms audio packetization
|
||||
VIDEO_CLOCK_RATE = 90000
|
||||
VIDEO_PTIME = 1 / 25 # 30fps
|
||||
VIDEO_TIME_BASE = fractions.Fraction(1, VIDEO_CLOCK_RATE)
|
||||
SAMPLE_RATE = 16000
|
||||
AUDIO_TIME_BASE = fractions.Fraction(1, SAMPLE_RATE)
|
||||
|
||||
#from aiortc.contrib.media import MediaPlayer, MediaRelay
|
||||
#from aiortc.rtcrtpsender import RTCRtpSender
|
||||
from aiortc import (
|
||||
MediaStreamTrack,
|
||||
)
|
||||
|
||||
logging.basicConfig()
|
||||
logger = logging.getLogger(__name__)
|
||||
|
||||
|
||||
class PlayerStreamTrack(MediaStreamTrack):
|
||||
"""
|
||||
A video track that returns an animated flag.
|
||||
"""
|
||||
|
||||
def __init__(self, player, kind):
|
||||
super().__init__() # don't forget this!
|
||||
self.kind = kind
|
||||
self._player = player
|
||||
self._queue = asyncio.Queue()
|
||||
self.timelist = [] #记录最近包的时间戳
|
||||
if self.kind == 'video':
|
||||
self.framecount = 0
|
||||
self.lasttime = time.perf_counter()
|
||||
self.totaltime = 0
|
||||
|
||||
_start: float
|
||||
_timestamp: int
|
||||
|
||||
async def next_timestamp(self) -> Tuple[int, fractions.Fraction]:
|
||||
if self.readyState != "live":
|
||||
raise Exception
|
||||
|
||||
if self.kind == 'video':
|
||||
if hasattr(self, "_timestamp"):
|
||||
#self._timestamp = (time.time()-self._start) * VIDEO_CLOCK_RATE
|
||||
self._timestamp += int(VIDEO_PTIME * VIDEO_CLOCK_RATE)
|
||||
# wait = self._start + (self._timestamp / VIDEO_CLOCK_RATE) - time.time()
|
||||
wait = self.timelist[0] + len(self.timelist)*VIDEO_PTIME - time.time()
|
||||
if wait>0:
|
||||
await asyncio.sleep(wait)
|
||||
self.timelist.append(time.time())
|
||||
if len(self.timelist)>100:
|
||||
self.timelist.pop(0)
|
||||
else:
|
||||
self._start = time.time()
|
||||
self._timestamp = 0
|
||||
self.timelist.append(self._start)
|
||||
print('video start:',self._start)
|
||||
return self._timestamp, VIDEO_TIME_BASE
|
||||
else: #audio
|
||||
if hasattr(self, "_timestamp"):
|
||||
#self._timestamp = (time.time()-self._start) * SAMPLE_RATE
|
||||
self._timestamp += int(AUDIO_PTIME * SAMPLE_RATE)
|
||||
# wait = self._start + (self._timestamp / SAMPLE_RATE) - time.time()
|
||||
wait = self.timelist[0] + len(self.timelist)*AUDIO_PTIME - time.time()
|
||||
if wait>0:
|
||||
await asyncio.sleep(wait)
|
||||
self.timelist.append(time.time())
|
||||
if len(self.timelist)>200:
|
||||
self.timelist.pop(0)
|
||||
else:
|
||||
self._start = time.time()
|
||||
self._timestamp = 0
|
||||
self.timelist.append(self._start)
|
||||
print('audio start:',self._start)
|
||||
return self._timestamp, AUDIO_TIME_BASE
|
||||
|
||||
async def recv(self) -> Union[Frame, Packet]:
|
||||
# frame = self.frames[self.counter % 30]
|
||||
self._player._start(self)
|
||||
# if self.kind == 'video':
|
||||
# frame = await self._queue.get()
|
||||
# else: #audio
|
||||
# if hasattr(self, "_timestamp"):
|
||||
# wait = self._start + self._timestamp / SAMPLE_RATE + AUDIO_PTIME - time.time()
|
||||
# if wait>0:
|
||||
# await asyncio.sleep(wait)
|
||||
# if self._queue.qsize()<1:
|
||||
# #frame = AudioFrame(format='s16', layout='mono', samples=320)
|
||||
# audio = np.zeros((1, 320), dtype=np.int16)
|
||||
# frame = AudioFrame.from_ndarray(audio, layout='mono', format='s16')
|
||||
# frame.sample_rate=16000
|
||||
# else:
|
||||
# frame = await self._queue.get()
|
||||
# else:
|
||||
# frame = await self._queue.get()
|
||||
frame = await self._queue.get()
|
||||
pts, time_base = await self.next_timestamp()
|
||||
frame.pts = pts
|
||||
frame.time_base = time_base
|
||||
if frame is None:
|
||||
self.stop()
|
||||
raise Exception
|
||||
if self.kind == 'video':
|
||||
self.totaltime += (time.perf_counter() - self.lasttime)
|
||||
self.framecount += 1
|
||||
self.lasttime = time.perf_counter()
|
||||
if self.framecount==100:
|
||||
print(f"------actual avg final fps:{self.framecount/self.totaltime:.4f}")
|
||||
self.framecount = 0
|
||||
self.totaltime=0
|
||||
return frame
|
||||
|
||||
def stop(self):
|
||||
super().stop()
|
||||
if self._player is not None:
|
||||
self._player._stop(self)
|
||||
self._player = None
|
||||
|
||||
def player_worker_thread(
|
||||
quit_event,
|
||||
loop,
|
||||
container,
|
||||
audio_track,
|
||||
video_track
|
||||
):
|
||||
container.render(quit_event,loop,audio_track,video_track)
|
||||
|
||||
class HumanPlayer:
|
||||
|
||||
def __init__(
|
||||
self, nerfreal, format=None, options=None, timeout=None, loop=False, decode=True
|
||||
):
|
||||
self.__thread: Optional[threading.Thread] = None
|
||||
self.__thread_quit: Optional[threading.Event] = None
|
||||
|
||||
# examine streams
|
||||
self.__started: Set[PlayerStreamTrack] = set()
|
||||
self.__audio: Optional[PlayerStreamTrack] = None
|
||||
self.__video: Optional[PlayerStreamTrack] = None
|
||||
|
||||
self.__audio = PlayerStreamTrack(self, kind="audio")
|
||||
self.__video = PlayerStreamTrack(self, kind="video")
|
||||
|
||||
self.__container = nerfreal
|
||||
|
||||
|
||||
@property
|
||||
def audio(self) -> MediaStreamTrack:
|
||||
"""
|
||||
A :class:`aiortc.MediaStreamTrack` instance if the file contains audio.
|
||||
"""
|
||||
return self.__audio
|
||||
|
||||
@property
|
||||
def video(self) -> MediaStreamTrack:
|
||||
"""
|
||||
A :class:`aiortc.MediaStreamTrack` instance if the file contains video.
|
||||
"""
|
||||
return self.__video
|
||||
|
||||
def _start(self, track: PlayerStreamTrack) -> None:
|
||||
self.__started.add(track)
|
||||
if self.__thread is None:
|
||||
self.__log_debug("Starting worker thread")
|
||||
self.__thread_quit = threading.Event()
|
||||
self.__thread = threading.Thread(
|
||||
name="media-player",
|
||||
target=player_worker_thread,
|
||||
args=(
|
||||
self.__thread_quit,
|
||||
asyncio.get_event_loop(),
|
||||
self.__container,
|
||||
self.__audio,
|
||||
self.__video
|
||||
),
|
||||
)
|
||||
self.__thread.start()
|
||||
|
||||
def _stop(self, track: PlayerStreamTrack) -> None:
|
||||
self.__started.discard(track)
|
||||
|
||||
if not self.__started and self.__thread is not None:
|
||||
self.__log_debug("Stopping worker thread")
|
||||
self.__thread_quit.set()
|
||||
self.__thread.join()
|
||||
self.__thread = None
|
||||
|
||||
if not self.__started and self.__container is not None:
|
||||
#self.__container.close()
|
||||
self.__container = None
|
||||
|
||||
def __log_debug(self, msg: str, *args) -> None:
|
||||
logger.debug(f"HumanPlayer {msg}", *args)
|
||||
|
Loading…
Reference in New Issue