add wav2lip stream
parent
39d7aff90a
commit
592312ab8c
@ -0,0 +1,98 @@
|
|||||||
|
import time
|
||||||
|
import torch
|
||||||
|
import numpy as np
|
||||||
|
import soundfile as sf
|
||||||
|
import resampy
|
||||||
|
|
||||||
|
import queue
|
||||||
|
from queue import Queue
|
||||||
|
from io import BytesIO
|
||||||
|
import multiprocessing as mp
|
||||||
|
|
||||||
|
from wav2lip import audio
|
||||||
|
|
||||||
|
class LipASR:
|
||||||
|
def __init__(self, opt):
|
||||||
|
self.opt = opt
|
||||||
|
|
||||||
|
self.fps = opt.fps # 20 ms per frame
|
||||||
|
self.sample_rate = 16000
|
||||||
|
self.chunk = self.sample_rate // self.fps # 320 samples per chunk (20ms * 16000 / 1000)
|
||||||
|
self.queue = Queue()
|
||||||
|
# self.input_stream = BytesIO()
|
||||||
|
self.output_queue = mp.Queue()
|
||||||
|
|
||||||
|
#self.audio_processor = audio_processor
|
||||||
|
self.batch_size = opt.batch_size
|
||||||
|
|
||||||
|
self.frames = []
|
||||||
|
self.stride_left_size = self.stride_right_size = 10
|
||||||
|
self.context_size = 10
|
||||||
|
self.audio_feats = []
|
||||||
|
self.feat_queue = mp.Queue(5)
|
||||||
|
|
||||||
|
self.warm_up()
|
||||||
|
|
||||||
|
def put_audio_frame(self,audio_chunk): #16khz 20ms pcm
|
||||||
|
self.queue.put(audio_chunk)
|
||||||
|
|
||||||
|
def __get_audio_frame(self):
|
||||||
|
try:
|
||||||
|
frame = self.queue.get(block=True,timeout=0.018)
|
||||||
|
type = 0
|
||||||
|
#print(f'[INFO] get frame {frame.shape}')
|
||||||
|
except queue.Empty:
|
||||||
|
frame = np.zeros(self.chunk, dtype=np.float32)
|
||||||
|
type = 1
|
||||||
|
|
||||||
|
return frame,type
|
||||||
|
|
||||||
|
def get_audio_out(self): #get origin audio pcm to nerf
|
||||||
|
return self.output_queue.get()
|
||||||
|
|
||||||
|
def warm_up(self):
|
||||||
|
for _ in range(self.stride_left_size + self.stride_right_size):
|
||||||
|
audio_frame,type=self.__get_audio_frame()
|
||||||
|
self.frames.append(audio_frame)
|
||||||
|
self.output_queue.put((audio_frame,type))
|
||||||
|
for _ in range(self.stride_left_size):
|
||||||
|
self.output_queue.get()
|
||||||
|
|
||||||
|
def run_step(self):
|
||||||
|
############################################## extract audio feature ##############################################
|
||||||
|
# get a frame of audio
|
||||||
|
for _ in range(self.batch_size*2):
|
||||||
|
frame,type = self.__get_audio_frame()
|
||||||
|
self.frames.append(frame)
|
||||||
|
# put to output
|
||||||
|
self.output_queue.put((frame,type))
|
||||||
|
# context not enough, do not run network.
|
||||||
|
if len(self.frames) < self.stride_left_size + self.context_size + self.stride_right_size:
|
||||||
|
return
|
||||||
|
|
||||||
|
inputs = np.concatenate(self.frames) # [N * chunk]
|
||||||
|
mel = audio.melspectrogram(inputs)
|
||||||
|
#print(mel.shape[0],mel.shape,len(mel[0]),len(self.frames))
|
||||||
|
# cut off stride
|
||||||
|
left = max(0, self.stride_left_size*80/50)
|
||||||
|
right = min(len(mel[0]), len(mel[0]) - self.stride_right_size*80/50)
|
||||||
|
mel_idx_multiplier = 80.*2/self.fps
|
||||||
|
mel_step_size = 16
|
||||||
|
i = 0
|
||||||
|
mel_chunks = []
|
||||||
|
while i < (len(self.frames)-self.stride_left_size-self.stride_right_size)/2:
|
||||||
|
start_idx = int(left + i * mel_idx_multiplier)
|
||||||
|
#print(start_idx)
|
||||||
|
if start_idx + mel_step_size > len(mel[0]):
|
||||||
|
mel_chunks.append(mel[:, len(mel[0]) - mel_step_size:])
|
||||||
|
else:
|
||||||
|
mel_chunks.append(mel[:, start_idx : start_idx + mel_step_size])
|
||||||
|
i += 1
|
||||||
|
self.feat_queue.put(mel_chunks)
|
||||||
|
|
||||||
|
# discard the old part to save memory
|
||||||
|
self.frames = self.frames[-(self.stride_left_size + self.stride_right_size):]
|
||||||
|
|
||||||
|
|
||||||
|
def get_next_feat(self,block,timeout):
|
||||||
|
return self.feat_queue.get(block,timeout)
|
@ -0,0 +1,125 @@
|
|||||||
|
from os import listdir, path
|
||||||
|
import numpy as np
|
||||||
|
import scipy, cv2, os, sys, argparse
|
||||||
|
import json, subprocess, random, string
|
||||||
|
from tqdm import tqdm
|
||||||
|
from glob import glob
|
||||||
|
import torch
|
||||||
|
import pickle
|
||||||
|
import face_detection
|
||||||
|
|
||||||
|
|
||||||
|
parser = argparse.ArgumentParser(description='Inference code to lip-sync videos in the wild using Wav2Lip models')
|
||||||
|
parser.add_argument('--img_size', default=96, type=int)
|
||||||
|
parser.add_argument('--avatar_id', default='wav2lip_avatar1', type=str)
|
||||||
|
parser.add_argument('--video_path', default='', type=str)
|
||||||
|
parser.add_argument('--nosmooth', default=False, action='store_true',
|
||||||
|
help='Prevent smoothing face detections over a short temporal window')
|
||||||
|
parser.add_argument('--pads', nargs='+', type=int, default=[0, 10, 0, 0],
|
||||||
|
help='Padding (top, bottom, left, right). Please adjust to include chin at least')
|
||||||
|
parser.add_argument('--face_det_batch_size', type=int,
|
||||||
|
help='Batch size for face detection', default=16)
|
||||||
|
args = parser.parse_args()
|
||||||
|
|
||||||
|
device = 'cuda' if torch.cuda.is_available() else 'cpu'
|
||||||
|
print('Using {} for inference.'.format(device))
|
||||||
|
|
||||||
|
def osmakedirs(path_list):
|
||||||
|
for path in path_list:
|
||||||
|
os.makedirs(path) if not os.path.exists(path) else None
|
||||||
|
|
||||||
|
def video2imgs(vid_path, save_path, ext = '.png',cut_frame = 10000000):
|
||||||
|
cap = cv2.VideoCapture(vid_path)
|
||||||
|
count = 0
|
||||||
|
while True:
|
||||||
|
if count > cut_frame:
|
||||||
|
break
|
||||||
|
ret, frame = cap.read()
|
||||||
|
if ret:
|
||||||
|
cv2.imwrite(f"{save_path}/{count:08d}.png", frame)
|
||||||
|
count += 1
|
||||||
|
else:
|
||||||
|
break
|
||||||
|
|
||||||
|
def read_imgs(img_list):
|
||||||
|
frames = []
|
||||||
|
print('reading images...')
|
||||||
|
for img_path in tqdm(img_list):
|
||||||
|
frame = cv2.imread(img_path)
|
||||||
|
frames.append(frame)
|
||||||
|
return frames
|
||||||
|
|
||||||
|
def get_smoothened_boxes(boxes, T):
|
||||||
|
for i in range(len(boxes)):
|
||||||
|
if i + T > len(boxes):
|
||||||
|
window = boxes[len(boxes) - T:]
|
||||||
|
else:
|
||||||
|
window = boxes[i : i + T]
|
||||||
|
boxes[i] = np.mean(window, axis=0)
|
||||||
|
return boxes
|
||||||
|
|
||||||
|
def face_detect(images):
|
||||||
|
detector = face_detection.FaceAlignment(face_detection.LandmarksType._2D,
|
||||||
|
flip_input=False, device=device)
|
||||||
|
|
||||||
|
batch_size = args.face_det_batch_size
|
||||||
|
|
||||||
|
while 1:
|
||||||
|
predictions = []
|
||||||
|
try:
|
||||||
|
for i in tqdm(range(0, len(images), batch_size)):
|
||||||
|
predictions.extend(detector.get_detections_for_batch(np.array(images[i:i + batch_size])))
|
||||||
|
except RuntimeError:
|
||||||
|
if batch_size == 1:
|
||||||
|
raise RuntimeError('Image too big to run face detection on GPU. Please use the --resize_factor argument')
|
||||||
|
batch_size //= 2
|
||||||
|
print('Recovering from OOM error; New batch size: {}'.format(batch_size))
|
||||||
|
continue
|
||||||
|
break
|
||||||
|
|
||||||
|
results = []
|
||||||
|
pady1, pady2, padx1, padx2 = args.pads
|
||||||
|
for rect, image in zip(predictions, images):
|
||||||
|
if rect is None:
|
||||||
|
cv2.imwrite('temp/faulty_frame.jpg', image) # check this frame where the face was not detected.
|
||||||
|
raise ValueError('Face not detected! Ensure the video contains a face in all the frames.')
|
||||||
|
|
||||||
|
y1 = max(0, rect[1] - pady1)
|
||||||
|
y2 = min(image.shape[0], rect[3] + pady2)
|
||||||
|
x1 = max(0, rect[0] - padx1)
|
||||||
|
x2 = min(image.shape[1], rect[2] + padx2)
|
||||||
|
|
||||||
|
results.append([x1, y1, x2, y2])
|
||||||
|
|
||||||
|
boxes = np.array(results)
|
||||||
|
if not args.nosmooth: boxes = get_smoothened_boxes(boxes, T=5)
|
||||||
|
results = [[image[y1: y2, x1:x2], (y1, y2, x1, x2)] for image, (x1, y1, x2, y2) in zip(images, boxes)]
|
||||||
|
|
||||||
|
del detector
|
||||||
|
return results
|
||||||
|
|
||||||
|
if __name__ == "__main__":
|
||||||
|
avatar_path = f"./results/avatars/{args.avatar_id}"
|
||||||
|
full_imgs_path = f"{avatar_path}/full_imgs"
|
||||||
|
face_imgs_path = f"{avatar_path}/face_imgs"
|
||||||
|
coords_path = f"{avatar_path}/coords.pkl"
|
||||||
|
osmakedirs([avatar_path,full_imgs_path,face_imgs_path])
|
||||||
|
print(args)
|
||||||
|
|
||||||
|
#if os.path.isfile(args.video_path):
|
||||||
|
video2imgs(args.video_path, full_imgs_path, ext = 'png')
|
||||||
|
input_img_list = sorted(glob(os.path.join(full_imgs_path, '*.[jpJP][pnPN]*[gG]')))
|
||||||
|
|
||||||
|
frames = read_imgs(input_img_list)
|
||||||
|
face_det_results = face_detect(frames)
|
||||||
|
coord_list = []
|
||||||
|
idx = 0
|
||||||
|
for frame,coords in face_det_results:
|
||||||
|
#x1, y1, x2, y2 = bbox
|
||||||
|
resized_crop_frame = cv2.resize(frame,(args.img_size, args.img_size)) #,interpolation = cv2.INTER_LANCZOS4)
|
||||||
|
cv2.imwrite(f"{face_imgs_path}/{idx:08d}.png", resized_crop_frame)
|
||||||
|
coord_list.append(coords)
|
||||||
|
idx = idx + 1
|
||||||
|
|
||||||
|
with open(coords_path, 'wb') as f:
|
||||||
|
pickle.dump(coord_list, f)
|
@ -1,194 +1,205 @@
|
|||||||
|
|
||||||
import asyncio
|
import asyncio
|
||||||
import json
|
import json
|
||||||
import logging
|
import logging
|
||||||
import threading
|
import threading
|
||||||
import time
|
import time
|
||||||
from typing import Tuple, Dict, Optional, Set, Union
|
from typing import Tuple, Dict, Optional, Set, Union
|
||||||
from av.frame import Frame
|
from av.frame import Frame
|
||||||
from av.packet import Packet
|
from av.packet import Packet
|
||||||
from av import AudioFrame
|
from av import AudioFrame
|
||||||
import fractions
|
import fractions
|
||||||
import numpy as np
|
import numpy as np
|
||||||
|
|
||||||
AUDIO_PTIME = 0.020 # 20ms audio packetization
|
AUDIO_PTIME = 0.020 # 20ms audio packetization
|
||||||
VIDEO_CLOCK_RATE = 90000
|
VIDEO_CLOCK_RATE = 90000
|
||||||
VIDEO_PTIME = 1 / 25 # 30fps
|
VIDEO_PTIME = 1 / 25 # 30fps
|
||||||
VIDEO_TIME_BASE = fractions.Fraction(1, VIDEO_CLOCK_RATE)
|
VIDEO_TIME_BASE = fractions.Fraction(1, VIDEO_CLOCK_RATE)
|
||||||
SAMPLE_RATE = 16000
|
SAMPLE_RATE = 16000
|
||||||
AUDIO_TIME_BASE = fractions.Fraction(1, SAMPLE_RATE)
|
AUDIO_TIME_BASE = fractions.Fraction(1, SAMPLE_RATE)
|
||||||
|
|
||||||
#from aiortc.contrib.media import MediaPlayer, MediaRelay
|
#from aiortc.contrib.media import MediaPlayer, MediaRelay
|
||||||
#from aiortc.rtcrtpsender import RTCRtpSender
|
#from aiortc.rtcrtpsender import RTCRtpSender
|
||||||
from aiortc import (
|
from aiortc import (
|
||||||
MediaStreamTrack,
|
MediaStreamTrack,
|
||||||
)
|
)
|
||||||
|
|
||||||
logging.basicConfig()
|
logging.basicConfig()
|
||||||
logger = logging.getLogger(__name__)
|
logger = logging.getLogger(__name__)
|
||||||
|
|
||||||
|
|
||||||
class PlayerStreamTrack(MediaStreamTrack):
|
class PlayerStreamTrack(MediaStreamTrack):
|
||||||
"""
|
"""
|
||||||
A video track that returns an animated flag.
|
A video track that returns an animated flag.
|
||||||
"""
|
"""
|
||||||
|
|
||||||
def __init__(self, player, kind):
|
def __init__(self, player, kind):
|
||||||
super().__init__() # don't forget this!
|
super().__init__() # don't forget this!
|
||||||
self.kind = kind
|
self.kind = kind
|
||||||
self._player = player
|
self._player = player
|
||||||
self._queue = asyncio.Queue()
|
self._queue = asyncio.Queue()
|
||||||
if self.kind == 'video':
|
self.timelist = [] #记录最近包的时间戳
|
||||||
self.framecount = 0
|
if self.kind == 'video':
|
||||||
self.lasttime = time.perf_counter()
|
self.framecount = 0
|
||||||
self.totaltime = 0
|
self.lasttime = time.perf_counter()
|
||||||
|
self.totaltime = 0
|
||||||
_start: float
|
|
||||||
_timestamp: int
|
_start: float
|
||||||
|
_timestamp: int
|
||||||
async def next_timestamp(self) -> Tuple[int, fractions.Fraction]:
|
|
||||||
if self.readyState != "live":
|
async def next_timestamp(self) -> Tuple[int, fractions.Fraction]:
|
||||||
raise Exception
|
if self.readyState != "live":
|
||||||
|
raise Exception
|
||||||
if self.kind == 'video':
|
|
||||||
if hasattr(self, "_timestamp"):
|
if self.kind == 'video':
|
||||||
# self._timestamp = (time.time()-self._start) * VIDEO_CLOCK_RATE
|
if hasattr(self, "_timestamp"):
|
||||||
self._timestamp += int(VIDEO_PTIME * VIDEO_CLOCK_RATE)
|
#self._timestamp = (time.time()-self._start) * VIDEO_CLOCK_RATE
|
||||||
wait = self._start + (self._timestamp / VIDEO_CLOCK_RATE) - time.time()
|
self._timestamp += int(VIDEO_PTIME * VIDEO_CLOCK_RATE)
|
||||||
if wait>0:
|
# wait = self._start + (self._timestamp / VIDEO_CLOCK_RATE) - time.time()
|
||||||
await asyncio.sleep(wait)
|
wait = self.timelist[0] + len(self.timelist)*VIDEO_PTIME - time.time()
|
||||||
else:
|
if wait>0:
|
||||||
self._start = time.time()
|
await asyncio.sleep(wait)
|
||||||
self._timestamp = 0
|
self.timelist.append(time.time())
|
||||||
print('video start:',self._start)
|
if len(self.timelist)>100:
|
||||||
return self._timestamp, VIDEO_TIME_BASE
|
self.timelist.pop(0)
|
||||||
else: #audio
|
else:
|
||||||
if hasattr(self, "_timestamp"):
|
self._start = time.time()
|
||||||
# self._timestamp = (time.time()-self._start) * SAMPLE_RATE
|
self._timestamp = 0
|
||||||
self._timestamp += int(AUDIO_PTIME * SAMPLE_RATE)
|
self.timelist.append(self._start)
|
||||||
wait = self._start + (self._timestamp / SAMPLE_RATE) - time.time()
|
print('video start:',self._start)
|
||||||
if wait>0:
|
return self._timestamp, VIDEO_TIME_BASE
|
||||||
await asyncio.sleep(wait)
|
else: #audio
|
||||||
else:
|
if hasattr(self, "_timestamp"):
|
||||||
self._start = time.time()
|
#self._timestamp = (time.time()-self._start) * SAMPLE_RATE
|
||||||
self._timestamp = 0
|
self._timestamp += int(AUDIO_PTIME * SAMPLE_RATE)
|
||||||
print('audio start:',self._start)
|
# wait = self._start + (self._timestamp / SAMPLE_RATE) - time.time()
|
||||||
return self._timestamp, AUDIO_TIME_BASE
|
wait = self.timelist[0] + len(self.timelist)*AUDIO_PTIME - time.time()
|
||||||
|
if wait>0:
|
||||||
async def recv(self) -> Union[Frame, Packet]:
|
await asyncio.sleep(wait)
|
||||||
# frame = self.frames[self.counter % 30]
|
self.timelist.append(time.time())
|
||||||
self._player._start(self)
|
if len(self.timelist)>200:
|
||||||
# if self.kind == 'video':
|
self.timelist.pop(0)
|
||||||
# frame = await self._queue.get()
|
else:
|
||||||
# else: #audio
|
self._start = time.time()
|
||||||
# if hasattr(self, "_timestamp"):
|
self._timestamp = 0
|
||||||
# wait = self._start + self._timestamp / SAMPLE_RATE + AUDIO_PTIME - time.time()
|
self.timelist.append(self._start)
|
||||||
# if wait>0:
|
print('audio start:',self._start)
|
||||||
# await asyncio.sleep(wait)
|
return self._timestamp, AUDIO_TIME_BASE
|
||||||
# if self._queue.qsize()<1:
|
|
||||||
# #frame = AudioFrame(format='s16', layout='mono', samples=320)
|
async def recv(self) -> Union[Frame, Packet]:
|
||||||
# audio = np.zeros((1, 320), dtype=np.int16)
|
# frame = self.frames[self.counter % 30]
|
||||||
# frame = AudioFrame.from_ndarray(audio, layout='mono', format='s16')
|
self._player._start(self)
|
||||||
# frame.sample_rate=16000
|
# if self.kind == 'video':
|
||||||
# else:
|
# frame = await self._queue.get()
|
||||||
# frame = await self._queue.get()
|
# else: #audio
|
||||||
# else:
|
# if hasattr(self, "_timestamp"):
|
||||||
# frame = await self._queue.get()
|
# wait = self._start + self._timestamp / SAMPLE_RATE + AUDIO_PTIME - time.time()
|
||||||
frame = await self._queue.get()
|
# if wait>0:
|
||||||
pts, time_base = await self.next_timestamp()
|
# await asyncio.sleep(wait)
|
||||||
frame.pts = pts
|
# if self._queue.qsize()<1:
|
||||||
frame.time_base = time_base
|
# #frame = AudioFrame(format='s16', layout='mono', samples=320)
|
||||||
if frame is None:
|
# audio = np.zeros((1, 320), dtype=np.int16)
|
||||||
self.stop()
|
# frame = AudioFrame.from_ndarray(audio, layout='mono', format='s16')
|
||||||
raise Exception
|
# frame.sample_rate=16000
|
||||||
if self.kind == 'video':
|
# else:
|
||||||
self.totaltime += (time.perf_counter() - self.lasttime)
|
# frame = await self._queue.get()
|
||||||
self.framecount += 1
|
# else:
|
||||||
self.lasttime = time.perf_counter()
|
# frame = await self._queue.get()
|
||||||
if self.framecount==100:
|
frame = await self._queue.get()
|
||||||
print(f"------actual avg final fps:{self.framecount/self.totaltime:.4f}")
|
pts, time_base = await self.next_timestamp()
|
||||||
self.framecount = 0
|
frame.pts = pts
|
||||||
self.totaltime=0
|
frame.time_base = time_base
|
||||||
return frame
|
if frame is None:
|
||||||
|
self.stop()
|
||||||
def stop(self):
|
raise Exception
|
||||||
super().stop()
|
if self.kind == 'video':
|
||||||
if self._player is not None:
|
self.totaltime += (time.perf_counter() - self.lasttime)
|
||||||
self._player._stop(self)
|
self.framecount += 1
|
||||||
self._player = None
|
self.lasttime = time.perf_counter()
|
||||||
|
if self.framecount==100:
|
||||||
def player_worker_thread(
|
print(f"------actual avg final fps:{self.framecount/self.totaltime:.4f}")
|
||||||
quit_event,
|
self.framecount = 0
|
||||||
loop,
|
self.totaltime=0
|
||||||
container,
|
return frame
|
||||||
audio_track,
|
|
||||||
video_track
|
def stop(self):
|
||||||
):
|
super().stop()
|
||||||
container.render(quit_event,loop,audio_track,video_track)
|
if self._player is not None:
|
||||||
|
self._player._stop(self)
|
||||||
class HumanPlayer:
|
self._player = None
|
||||||
|
|
||||||
def __init__(
|
def player_worker_thread(
|
||||||
self, nerfreal, format=None, options=None, timeout=None, loop=False, decode=True
|
quit_event,
|
||||||
):
|
loop,
|
||||||
self.__thread: Optional[threading.Thread] = None
|
container,
|
||||||
self.__thread_quit: Optional[threading.Event] = None
|
audio_track,
|
||||||
|
video_track
|
||||||
# examine streams
|
):
|
||||||
self.__started: Set[PlayerStreamTrack] = set()
|
container.render(quit_event,loop,audio_track,video_track)
|
||||||
self.__audio: Optional[PlayerStreamTrack] = None
|
|
||||||
self.__video: Optional[PlayerStreamTrack] = None
|
class HumanPlayer:
|
||||||
|
|
||||||
self.__audio = PlayerStreamTrack(self, kind="audio")
|
def __init__(
|
||||||
self.__video = PlayerStreamTrack(self, kind="video")
|
self, nerfreal, format=None, options=None, timeout=None, loop=False, decode=True
|
||||||
|
):
|
||||||
self.__container = nerfreal
|
self.__thread: Optional[threading.Thread] = None
|
||||||
|
self.__thread_quit: Optional[threading.Event] = None
|
||||||
|
|
||||||
@property
|
# examine streams
|
||||||
def audio(self) -> MediaStreamTrack:
|
self.__started: Set[PlayerStreamTrack] = set()
|
||||||
"""
|
self.__audio: Optional[PlayerStreamTrack] = None
|
||||||
A :class:`aiortc.MediaStreamTrack` instance if the file contains audio.
|
self.__video: Optional[PlayerStreamTrack] = None
|
||||||
"""
|
|
||||||
return self.__audio
|
self.__audio = PlayerStreamTrack(self, kind="audio")
|
||||||
|
self.__video = PlayerStreamTrack(self, kind="video")
|
||||||
@property
|
|
||||||
def video(self) -> MediaStreamTrack:
|
self.__container = nerfreal
|
||||||
"""
|
|
||||||
A :class:`aiortc.MediaStreamTrack` instance if the file contains video.
|
|
||||||
"""
|
@property
|
||||||
return self.__video
|
def audio(self) -> MediaStreamTrack:
|
||||||
|
"""
|
||||||
def _start(self, track: PlayerStreamTrack) -> None:
|
A :class:`aiortc.MediaStreamTrack` instance if the file contains audio.
|
||||||
self.__started.add(track)
|
"""
|
||||||
if self.__thread is None:
|
return self.__audio
|
||||||
self.__log_debug("Starting worker thread")
|
|
||||||
self.__thread_quit = threading.Event()
|
@property
|
||||||
self.__thread = threading.Thread(
|
def video(self) -> MediaStreamTrack:
|
||||||
name="media-player",
|
"""
|
||||||
target=player_worker_thread,
|
A :class:`aiortc.MediaStreamTrack` instance if the file contains video.
|
||||||
args=(
|
"""
|
||||||
self.__thread_quit,
|
return self.__video
|
||||||
asyncio.get_event_loop(),
|
|
||||||
self.__container,
|
def _start(self, track: PlayerStreamTrack) -> None:
|
||||||
self.__audio,
|
self.__started.add(track)
|
||||||
self.__video
|
if self.__thread is None:
|
||||||
),
|
self.__log_debug("Starting worker thread")
|
||||||
)
|
self.__thread_quit = threading.Event()
|
||||||
self.__thread.start()
|
self.__thread = threading.Thread(
|
||||||
|
name="media-player",
|
||||||
def _stop(self, track: PlayerStreamTrack) -> None:
|
target=player_worker_thread,
|
||||||
self.__started.discard(track)
|
args=(
|
||||||
|
self.__thread_quit,
|
||||||
if not self.__started and self.__thread is not None:
|
asyncio.get_event_loop(),
|
||||||
self.__log_debug("Stopping worker thread")
|
self.__container,
|
||||||
self.__thread_quit.set()
|
self.__audio,
|
||||||
self.__thread.join()
|
self.__video
|
||||||
self.__thread = None
|
),
|
||||||
|
)
|
||||||
if not self.__started and self.__container is not None:
|
self.__thread.start()
|
||||||
#self.__container.close()
|
|
||||||
self.__container = None
|
def _stop(self, track: PlayerStreamTrack) -> None:
|
||||||
|
self.__started.discard(track)
|
||||||
def __log_debug(self, msg: str, *args) -> None:
|
|
||||||
logger.debug(f"HumanPlayer {msg}", *args)
|
if not self.__started and self.__thread is not None:
|
||||||
|
self.__log_debug("Stopping worker thread")
|
||||||
|
self.__thread_quit.set()
|
||||||
|
self.__thread.join()
|
||||||
|
self.__thread = None
|
||||||
|
|
||||||
|
if not self.__started and self.__container is not None:
|
||||||
|
#self.__container.close()
|
||||||
|
self.__container = None
|
||||||
|
|
||||||
|
def __log_debug(self, msg: str, *args) -> None:
|
||||||
|
logger.debug(f"HumanPlayer {msg}", *args)
|
||||||
|
Loading…
Reference in New Issue