@ -431,16 +431,25 @@ class GStreamerBackend(PlayerBackend):
# Hardware decode (NV12): insert a videoscale → capsfilter chain inside a
# GstBin before the appsink so playbin accepts it as a single video-sink.
#
# videoscale(method=nearest-neighbour) — scales 1920×1080 → 640×480.
# Nearest-neighbour skips ~56% of source rows so only ~44% of the
# source cache lines are fetched. This is cheaper than the full
# 3.1 MB memmove (which loads 100% of cache lines), cutting the
# Python memmove from ~32 ms to ~1 ms at the cost of some SW scal e
# CPU time (~14 ms estimated for nearest vs 32 ms for bilinear) .
# videoscale(method=nearest-neighbour, add-borders=True) scales the
# decoded source to fit within the video area while preserving the
# original aspect ratio. Black NV12 borders fill any leftover space
# (letterbox / pillarbox), avoiding any stretch distortion.
# Nearest-neighbour skips ~56% of source rows so only ~44% of sourc e
# cache lines are fetched; Python memmove drops from ~32 ms to ~1 ms .
#
# capsfilter — enforces the output NV12 dimensions.
app_w , app_h = self . _viewport [ 0 ] , self . _viewport [ 1 ]
scale_w , scale_h = ( app_w or 640 ) , ( app_h or 480 )
# Use the actual video area inside the HUD (full window minus margins)
# so the scale target matches the drawable region exactly.
# Dimensions are rounded down to even numbers (NV12 chroma subsampling
# requires both width and height to be divisible by 2).
vp_w , vp_h , vp_top , vp_bottom , vp_left , vp_right = self . _viewport
video_w = max ( 2 , vp_w - vp_left - vp_right )
video_h = max ( 2 , vp_h - vp_top - vp_bottom )
scale_w = ( video_w / / 2 ) * 2
scale_h = ( video_h / / 2 ) * 2
if scale_w < 2 or scale_h < 2 :
scale_w , scale_h = 640 , 480
log . info ( " NV12 appsink: videoscale(nearest) → %d x %d before appsink " , scale_w , scale_h )
scale = self . _gst . ElementFactory . make ( " videoscale " , " vscale " )
@ -454,11 +463,15 @@ class GStreamerBackend(PlayerBackend):
# nearest-neighbour: accesses only the source pixels needed for each
# output sample (strided reads), skipping ~56% of source rows entirely.
# add-borders=True: letterbox/pillarbox to preserve the source aspect
# ratio instead of stretching to fill the target dimensions.
scale . set_property ( " method " , 0 )
scale . set_property ( " add-borders " , True )
capsfilter . set_property (
" caps " ,
self . _gst . Caps . from_string (
f " video/x-raw,format=NV12,width= { scale_w } ,height= { scale_h } "
f " video/x-raw,format=NV12,width= { scale_w } ,height= { scale_h } , "
f " pixel-aspect-ratio=1/1 "
) ,
)