This is the current script, which outputs the expected difference between the current stream image and a reference:
#!/usr/bin/env python
# GST_DEBUG=3,python:5,gnl*:5 python 01_parsepipeline.py http://www.ustream.tv/channel/17074538 worst novideo.png
from __future__ import print_function
import sys
import gi
from gi.repository import GObject as gobject, Gst as gst
from livestreamer import Livestreamer, StreamError, PluginError, NoPluginError
import cv2
import numpy
def exit(msg):
print(msg, file=sys.stderr)
sys.exit()
class Player(object):
def __init__(self):
self.fd = None
self.mainloop = gobject.MainLoop()
# This creates a playbin pipeline and using the appsrc source
# we can feed it our stream data
self.pipeline = gst.parse_launch('uridecodebin uri=appsrc:// name=decoder \
decoder. ! videorate ! video/x-raw,framerate=1/1 ! tee name=t \
t. ! queue ! videoconvert ! video/x-raw,format=RGB ! appsink name=appsink \
decoder. ! queue ! audioconvert ! fakesink')
if self.pipeline is None:
exit("couldn't build pipeline")
decoder = self.pipeline.get_by_name('decoder')
if decoder is None:
exit("couldn't get decoder")
decoder.connect("source-setup", self.on_source_setup)
vsink = self.pipeline.get_by_name('appsink')
if vsink is None:
exit("couldn't get sink")
vsink.set_property("emit-signals", True)
vsink.set_property("max-buffers", 1)
vsink.connect("new-sample", self.on_new_sample)
# Creates a bus and set callbacks to receive errors
self.bus = self.pipeline.get_bus()
self.bus.add_signal_watch()
self.bus.connect("message::eos", self.on_eos)
self.bus.connect("message::error", self.on_error)
def on_new_sample(self, sink):
sample = sink.emit("pull-sample")
buf = sample.get_buffer()
caps = sample.get_caps()
height = caps.get_structure(0).get_value('height')
width = caps.get_structure(0).get_value('width')
(result, mapinfo) = buf.map(gst.MapFlags.READ)
if result == True:
arr = numpy.ndarray(
(height,
width,
3),
buffer=buf.extract_dup(0, buf.get_size()),
dtype=numpy.uint8)
resized_refimage = cv2.resize(refArray, (width, height))
diff = cv2.norm(arr, resized_refimage, cv2.NORM_L2)
buf.unmap(mapinfo)
s = "diff = " + str(diff)
print(s)
return gst.FlowReturn.OK
def exit(self, msg):
self.stop()
exit(msg)
def stop(self):
# Stop playback and exit mainloop
self.pipeline.set_state(gst.State.NULL)
self.mainloop.quit()
# Close the stream
if self.fd:
self.fd.close()
def play(self, stream):
# Attempt to open the stream
try:
self.fd = stream.open()
except StreamError as err:
self.exit("Failed to open stream: {0}".format(err))
# Start playback
self.pipeline.set_state(gst.State.PLAYING)
self.mainloop.run()
def on_source_setup(self, element, source):
# When this callback is called the appsrc expects
# us to feed it more data
print("source setup")
source.connect("need-data", self.on_source_need_data)
print("done")
def on_pad_added(self, element, pad):
string = pad.query_caps(None).to_string()
print(string)
if string.startswith('video/'):
#type = pad.get_caps()[0].get_name()
#print(type)
#if type.startswith("video"):
pad.link(self.vconverter.get_static_pad("sink"))
def on_source_need_data(self, source, length):
# Attempt to read data from the stream
try:
data = self.fd.read(length)
except IOError as err:
self.exit("Failed to read data from stream: {0}".format(err))
# If data is empty it's the end of stream
if not data:
source.emit("end-of-stream")
return
# Convert the Python bytes into a GStreamer Buffer
# and then push it to the appsrc
buf = gst.Buffer.new_wrapped(data)
source.emit("push-buffer", buf)
#print("sent " + str(length) + " bytes")
def on_eos(self, bus, msg):
# Stop playback on end of stream
self.stop()
def on_error(self, bus, msg):
# Print error message and exit on error
error = msg.parse_error()[1]
self.exit(error)
def main():
if len(sys.argv) < 4:
exit("Usage: {0} <url> <quality> <reference png image path>".format(sys.argv[0]))
# Initialize and check GStreamer version
gi.require_version("Gst", "1.0")
gobject.threads_init()
gst.init(None)
# Collect arguments
url = sys.argv[1]
quality = sys.argv[2]
refImage = sys.argv[3]
global refArray
image = cv2.imread(refImage)
refArray = cv2.cvtColor(image, cv2.COLOR_BGR2RGB)
# refArray = cv2.cvtColor(image, cv2.COLOR_BGR2GRAY)
refArray = cv2.blur(refArray, (3,3))
# Create the Livestreamer session
livestreamer = Livestreamer()
# Enable logging
livestreamer.set_loglevel("debug")
livestreamer.set_logoutput(sys.stdout)
# Attempt to fetch streams
try:
streams = livestreamer.streams(url)
except NoPluginError:
exit("Livestreamer is unable to handle the URL '{0}'".format(url))
except PluginError as err:
exit("Plugin error: {0}".format(err))
if not streams:
exit("No streams found on URL '{0}'".format(url))
# Look for specified stream
if quality not in streams:
exit("Unable to find '{0}' stream on URL '{1}'".format(quality, url))
# We found the stream
stream = streams[quality]
# Create the player and start playback
player = Player()
# Blocks until playback is done
player.play(stream)
if __name__ == "__main__":
main()
Unfortunately, it doesn't really work on the Pi. Probably relevant warnings and errors:from livestreamer:
[plugin.ustreamtv][warning] python-librtmp is not installed, but is needed to access the desktop streams
from gstreamer:0:00:17.111833668 9423 0xb0641520 ERROR vaapidecode ../../../gst/vaapi/gstvaapidecode.c:1025:gst_vaapidecode_ensure_allowed_caps: failed to retrieve VA display
and0:00:17.130139346 9423 0xb0641520 WARN uridecodebin gsturidecodebin.c:939:unknown_type_cb:<decoder> warning: No decoder available for type 'video/x-h264, stream-format=(string)byte-stream, alignment=(string)nal, width=(int)426, height=(int)240, framerate=(fraction)30/1, parsed=(boolean)true, pixel-aspect-ratio=(fraction)1/1, level=(string)2.1, profile=(string)main'.
python-librtmp couldn't be found so I couldn't install it. The error about a "VA display" seems to be related to h264 decoding (as per google), so fixing h264 might solve that. I just don't know how.
The reference image I use is in the project files.
Discussions
Become a Hackaday.io Member
Create an account to leave a comment. Already have an account? Log In.
sudo apt-get install librtmp-dev
sudo pip install python-librtmp
Found by "pip search rtmp"
Are you sure? yes | no
Thank you, python-librtmp is now installed (needed libffi-dev as well) I don't know how you did that, but now the script stopped working on my laptop as well. With a different error, though.
Are you sure? yes | no
Oh, right, needed libffi for paramiko.
Wait, you installed stuff on your laptop and it stopped working? Or are you hinting at my well-hidden supernatural powers in electronics? ;-)
Are you sure? yes | no
I installed librtmp-dev on the Pi and it stopped working on both the Pi and the laptop. So your supernatural powers are definitely involved. Maybe some other change affected both systems, though.
Are you sure? yes | no