The program is written in Qt, taking live video from a webcam through the QCamera class and passing it to a subclass of the QAbstractViewfinder, which passes the incoming QVideoFrame objects into an QOpenGLWidget subclass. To start the process, we ask the user to choose an available webcam to then create our QCamera object:

QStringList strings;
QList<QCameraInfo> cameras = QCameraInfo::availableCameras();
for (int n = 0; n < cameras.count(); n++) {
    strings << cameras.at(n).description();
}

if (strings.count() > 1) {
    bool okay = false;
    QString string = QInputDialog::getItem(this, QString("Select Camera"), 
                     QString("Select input device"), strings, 0, false, &okay);
    if (okay) {
        int n = strings.indexOf(string);
        camera = new QCamera(cameras.at(n));
    }
} else if (strings.count() == 1) {
    camera = new QCamera(cameras.first());
}

From here, we create our custom OpenGL widget along with our flavored viewfinder widget and assign it to the camera using the following code:

label = new LAURandomizePixelsGLWidget();       // CREATE AN OPENGLWIDGET FOR SHUFFLING PIXELS
this->layout()->addWidget(label);               // ADD THIS OPENGLWIDGET TO OUR MAIN WIDGET

surface = new LAUVideoSurface();                // CREATE OUR CUSTOM VIEWFINDER OBJECT
surface->setLabel(label);                       // GIVE IT A COPY OF OUR OPENGLWIDGET'S POINTER

camera->setViewfinder(surface);                 // HAND OUR VIEWFINDER OFF TO THE CAMERA
camera->setCaptureMode(QCamera::CaptureVideo);  // TELL THE CAMERA TO RUN IN VIDEO MODE

The role of our viewfinder widget is to simply pass the video frames from the camera to our OpenGL widget, which will do all the work shuffling pixels.  So we define out custom viewfinder with the following header:

class LAUVideoSurface : public QAbstractVideoSurface
{
    Q_OBJECT

public:
    explicit LAUVideoSurface(QObject *parent = NULL) : QAbstractVideoSurface(parent), 
                                                       labelWidget(NULL) { ; }

    LAUVideoGLWidget *label() const
    {
        return (labelWidget);
    }

    void setLabel(LAUVideoGLWidget *lbl)
    {
        labelWidget = lbl;
    }

    QVideoSurfaceFormat nearestFormat(const QVideoSurfaceFormat &format) const;
    bool isFormatSupported(const QVideoSurfaceFormat &format) const;
    bool present(const QVideoFrame &frame);
    bool start(const QVideoSurfaceFormat &format);
    void stop();

    QList<QVideoFrame::PixelFormat> supportedPixelFormats(QAbstractVideoBuffer::HandleType type = 
                                                          QAbstractVideoBuffer::NoHandle) const;

private:
    LAUVideoGLWidget *labelWidget;
};

So  the "setLabel" method takes the QOpenGLWidget from the user and keeps a local copy of its pointer.  While the nearestFormat, isFormatSupported, start, and stop methods are boiler plate, the one of interest is the present() method.  This method receives an incoming video frame from the QCamera and passes it to our OpenGL widget according to the following:

bool LAUVideoSurface::present(const QVideoFrame &frame)
{
    // SEND THE IN-COMING VIDEO TO THE LABEL WIDGET, IF IT EXISTS
    if (labelWidget) {
        labelWidget->setFrame(frame);
    }
    return (true);
}

For displaying video on screen in our QOpenGLWidget, we make the low-level subclass:

class LAUVideoGLWidget : public QOpenGLWidget, protected QOpenGLFunctions
{
    Q_OBJECT

public:
    explicit LAUVideoGLWidget(QWidget *parent = NULL) : QOpenGLWidget(parent), 
                                                        videoTexture(NULL), counter(0) { ; }
    ~LAUVideoGLWidget();

    virtual bool isValid() const
    {
        return (wasInitialized());
    }

    bool wasInitialized() const
    {
        return (vertexArrayObject.isCreated());
    }

    void setFrame(const QVideoFrame &frame);
    void setFrame(QImage frame);

    virtual void process() { ; }
    virtual void initialize();
    virtual void resize(int w, int h);
    virtual void paint();

protected:
    void initializeGL()
    {
        initialize();
    }

    void resizeGL(int w, int h)
    {
        resize(w, h);
    }

    void paintGL()
    {
        paint();
    }

    QOpenGLVertexArrayObject vertexArrayObject;
    QOpenGLBuffer quadVertexBuffer, quadIndexBuffer;
    QOpenGLShaderProgram program;
    QOpenGLTexture *videoTexture;

    int localWidth, localHeight;
    qreal devicePixelRatio;
};

which is responsible to drawing a quadrilateral that fills the widget and then fills the polygon with our incoming video texture. In filter specific flavors of this class, we simply need to over-ride the initialize and paint methods as well as the process() method which does the filtering. But in this low-level class, we include just enough code to display raw video on screen, and let the programmer override it as needed. Specifically, we build the necessary vertex, indices, shader program, and textures as follows:

void LAUVideoGLWidget::initialize()
{
    // INITIALIZE OUR GL CALLS AND SET THE CLEAR COLOR
    initializeOpenGLFunctions();
    glClearColor(0.5f, 0.0f, 0.0f, 1.0f);

    // CREATE THE VERTEX ARRAY OBJECT FOR FEEDING VERTICES TO OUR SHADER PROGRAMS
    vertexArrayObject.create();
    vertexArrayObject.bind();

    // CREATE VERTEX BUFFER TO HOLD CORNERS OF QUADRALATERAL
    quadVertexBuffer = QOpenGLBuffer(QOpenGLBuffer::VertexBuffer);
    quadVertexBuffer.create();
    quadVertexBuffer.setUsagePattern(QOpenGLBuffer::StaticDraw);
    if (quadVertexBuffer.bind()) {
        // ALLOCATE THE VERTEX BUFFER FOR HOLDING THE FOUR CORNERS OF A RECTANGLE
        quadVertexBuffer.allocate(16 * sizeof(float));
        float *buffer = (float *)quadVertexBuffer.map(QOpenGLBuffer::WriteOnly);
        if (buffer) {
            buffer[0]  = -1.0;
            buffer[1]  = -1.0;
            buffer[2]  = 0.0;
            buffer[3]  = 1.0;
            buffer[4]  = +1.0;
            buffer[5]  = -1.0;
            buffer[6]  = 0.0;
            buffer[7]  = 1.0;
            buffer[8]  = +1.0;
            buffer[9]  = +1.0;
            buffer[10] = 0.0;
            buffer[11] = 1.0;
            buffer[12] = -1.0;
            buffer[13] = +1.0;
            buffer[14] = 0.0;
            buffer[15] = 1.0;
            quadVertexBuffer.unmap();
        } else {
            qDebug() << QString("quadVertexBuffer not allocated.") << glGetError();
        }
        quadVertexBuffer.release();
    }

    // CREATE INDEX BUFFER TO ORDERINGS OF VERTICES FORMING POLYGON
    quadIndexBuffer = QOpenGLBuffer(QOpenGLBuffer::IndexBuffer);
    quadIndexBuffer.create();
    quadIndexBuffer.setUsagePattern(QOpenGLBuffer::StaticDraw);
    if (quadIndexBuffer.bind()) {
        quadIndexBuffer.allocate(6 * sizeof(unsigned int));
        unsigned int *indices = (unsigned int *)quadIndexBuffer.map(QOpenGLBuffer::WriteOnly);
        if (indices) {
            indices[0] = 0;
            indices[1] = 1;
            indices[2] = 2;
            indices[3] = 0;
            indices[4] = 2;
            indices[5] = 3;
            quadIndexBuffer.unmap();
        } else {
            qDebug() << QString("indiceBufferA buffer mapped from GPU.");
        }
        quadIndexBuffer.release();
    }

    // CREATE SHADER FOR SHOWING THE VIDEO NOT AVAILABLE IMAGE
    setlocale(LC_NUMERIC, "C");
    program.addShaderFromSourceFile(QOpenGLShader::Vertex,   ":/shaders/Shaders/displayRGBVideo.vert");
    program.addShaderFromSourceFile(QOpenGLShader::Fragment, ":/shaders/Shaders/displayRGBVideo.frag");
    program.link();
    setlocale(LC_ALL, "");
}

Again, the vertex buffer stores the four corners of the quadrilateral that will fill the widget.  The indices buffer represents the quadrilateral as two triangles.  The method closes by building the shader program.  Note that at this point, we don't know the size and format of the incoming video, so we will wait until we get that first frame of video before we create an OpenGL texture to hold the video on the GPU.  So the code for receiving this first frame of video is as follows:

void LAUVideoGLWidget::setFrame(const QVideoFrame &frame)
{
    QVideoFrame localFrame = frame;
    if (localFrame.map(QAbstractVideoBuffer::ReadOnly)) {
        makeCurrent();

        // SEE IF WE NEED A NEW TEXTURE TO HOLD THE INCOMING VIDEO FRAME
        if (!videoTexture || 
             videoTexture->width() != localFrame.width() || 
             videoTexture->height() != localFrame.height()) {

            if (videoTexture) {
                delete videoTexture;
            }

            // CREATE THE GPU SIDE TEXTURE BUFFER TO HOLD THE INCOMING VIDEO
            videoTexture = new QOpenGLTexture(QOpenGLTexture::Target2D);
            videoTexture->setSize(localFrame.width(), localFrame.height());
            videoTexture->setFormat(QOpenGLTexture::RGBA32F);
            videoTexture->setWrapMode(QOpenGLTexture::ClampToBorder);
            videoTexture->setMinificationFilter(QOpenGLTexture::Nearest);
            videoTexture->setMagnificationFilter(QOpenGLTexture::Nearest);
            videoTexture->allocateStorage();
        }

        // UPLOAD THE CPU BUFFER TO THE GPU TEXTURE
        // COPY FRAME BUFFER TEXTURE FROM GPU TO LOCAL CPU BUFFER
        QVideoFrame::PixelFormat format = localFrame.pixelFormat();
        if (format == QVideoFrame::Format_ARGB32) {
            unsigned int bytesPerSample = localFrame.bytesPerLine() / localFrame.width() / 4;
            if (bytesPerSample == sizeof(unsigned char)) {
                videoTexture->setData(QOpenGLTexture::BGRA, QOpenGLTexture::UInt8, 
                                      (const void *)localFrame.bits());
            }
        }
        localFrame.unmap();

        // PROCESS THE TEXTURE
        process();

        // UPDATE THE USER DISPLAY
        update();
    }
}

Note that as soon as we know we have a valid frame of video to display, we calculate the instantaneous frame rate as the time it takes to collect the last 30 frames of video. Note that we perform two tests.  First, we check to see if our GPU texture was already allocated or not.  If it was allocated, we test to make sure that the size of the allocated texture matches the size of the incoming video frame, in case the resolution of the incoming video was changed by the user mid-stream.  Once we have an appropriate texture on the GPU, we upload the incoming video buffer to the GPU and call the process() method to do any filtering. Note that its possible because of the underlying Qt implementation that the incoming video is sitting on the GPU, and that the QVideoFrame simply holds the pointer to the texture.  So we need to call the map() method to get a local copy of the video buffer on the CPU before we can perform a memory copy. And with a map() call, we need to match it with an unmap() call to tell Qt we are done with it.

To see how we display the texture on screen, the following is our vertex shader that simply passes our vertices through:

#version 330 core

in  vec4 qt_vertex;       // POINTS TO VERTICES PROVIDED BY USER ON CPU
out vec2 qt_coordinate;   // OUTPUT COORDINATE TO FRAGMENT SHADER

void main(void)
{
    // COPY THE VERTEX COORDINATE TO THE GL POSITION
    gl_Position = qt_vertex;
    qt_coordinate = (vec2(qt_vertex.x, -qt_vertex.y) + 1.0)/2.0;
}

The qt_coordinate variable allows us to convert the screen coordinates to texture coordinates, which allows us to appropriate flip the coordinates along the y-axis so the top of the texture displays on the top of the widget.  The fragment shader is then simple represented as:

#version 330 core

uniform sampler2D qt_texture;      // THIS TEXTURE HOLDS THE XYZ+TEXTURE COORDINATES
in           vec2 qt_coordinate;   // HOLDS THE TEXTURE COORDINATE FROM THE VERTEX SHADER

layout(location = 0, index = 0) out vec4 qt_fragColor;

void main()
{
    // GET THE PIXEL COORDINATE OF THE CURRENT FRAGMENT
    qt_fragColor = texture(qt_texture, qt_coordinate, 0).rgba;
}

With our low-level LAUOpenGLWidget class, we can now define an application specific widget that performs our pixel shuffling.  We start with the following class declaration:

class LAURandomizePixelsGLWidget : public LAUVideoGLWidget
{

public:
    explicit LAURandomizePixelsGLWidget(QWidget *parent = NULL) : LAUVideoGLWidget(parent), 
                                                 mapTexture(NULL), frameBufferObject(NULL)
    {
        ;
    }
    ~LAURandomizePixelsGLWidget();

    void initialize();
    void process();
    void paint();

private:
    QList<int> framesA, framesB;
    QOpenGLTexture *mapTexture;
    QOpenGLFramebufferObject *frameBufferObject;
    QOpenGLShaderProgram programA, programB;

    void buildMappingTexture(int cols, int rows);
};

For this filter, we are going to build a 3D texture that stores a set of pixel shuffling patterns with each pattern assigned to a layer of the 3D texture, and the frame index of these textures stored in the Z dimension of the texture.  But like the video texture from the low-level class, we need to wait until the first frame of video shows up.  Let's jump ahead and assume we know the incoming video size and then we can call the following:

void LAURandomizePixelsGLWidget::buildMappingTexture(int cols, int rows)
{
    // CHECK TO SEE IF THERE IS AN EXISTING TEXTURE THAT WE NEED TO DELETE
    if (mapTexture) {
        delete mapTexture;
    }

    // CREATE A NEW 3-D TEXTURE
    mapTexture = new QOpenGLTexture(QOpenGLTexture::Target3D);
    mapTexture->setSize(cols, rows, 16);
    mapTexture->setFormat(QOpenGLTexture::RG32F);
    mapTexture->setWrapMode(QOpenGLTexture::ClampToEdge);
    mapTexture->setMinificationFilter(QOpenGLTexture::Linear);
    mapTexture->setMagnificationFilter(QOpenGLTexture::Linear);
    mapTexture->allocateStorage();

    // CREATE A LIST OF AVAILABLE PIXELS
    QList<QPoint> pointsA, pointsB;
    for (int c = 0; c < cols; c++) {
        for (int r = 0; r < rows; r++) {
            pointsA << QPoint(c, r);
        }
    }

    // NOW SORT THE PIXELS IN RANDOM ORDER FRAME BY FRAME
    int index = 0;
    unsigned short *buffer = (unsigned short *)malloc(mapTexture->width() * mapTexture->height() * 
                             mapTexture->depth() * sizeof(unsigned short) * 2);
    for (int s = 0; s < mapTexture->depth(); s++) {
        framesA << s;
        if (pointsA.count() > pointsB.count()) {
            while (pointsA.isEmpty() == false) {
                int index = qFloor((double)rand() / (double)RAND_MAX * (double)pointsA.count());
                pointsB << pointsA.takeAt(index);
            }
            for (int n = 0; n < pointsB.count(); n++) {
                buffer[index++] = (unsigned short)pointsB.at(n).x();
                buffer[index++] = (unsigned short)pointsB.at(n).y();
            }
        } else {
            while (pointsB.isEmpty() == false) {
                int index = qFloor((double)rand() / (double)RAND_MAX * (double)pointsB.count());
                pointsA << pointsB.takeAt(index);
            }
            for (int n = 0; n < pointsA.count(); n++) {
                buffer[index++] = (unsigned short)pointsA.at(n).x();
                buffer[index++] = (unsigned short)pointsA.at(n).y();
            }
        }
    }

    // UPLOAD THE RANDOM INDICES TO THE GPU TEXTURE
    QOpenGLPixelTransferOptions options;
    options.setAlignment(1);
    mapTexture->setData(QOpenGLTexture::RG, QOpenGLTexture::UInt16, (const void *)buffer, &options);

    // DELETE THE TEMPORARY BUFFER
    free(buffer);
}

 The first portion of the method deletes any already map texture that we intend to replace and then creates the new 3D texture.

 we need two shader programs and a frame buffer object that will hold the pixel shuffled video frame, which is what we intend to display on screen.  We really only need one shader program to do the shuffling with a second program to display the incoming video side by side with the shuffled frame.  To see how we initialize these new class elements, we have the custom flavored initialize() method:

void LAURandomizePixelsGLWidget::initialize()
{
    LAUVideoGLWidget::initialize();

    // NOW ADD OUR LIST OF HARRIS CORNER SHADER PROGRAMS
    setlocale(LC_NUMERIC, "C");
    programA.addShaderFromSourceFile(QOpenGLShader::Vertex,   ":/shaders/Shaders/filterRandomMappingA.vert");
    programA.addShaderFromSourceFile(QOpenGLShader::Fragment, ":/shaders/Shaders/filterRandomMappingA.frag");
    programA.link();

    programB.addShaderFromSourceFile(QOpenGLShader::Vertex,   ":/shaders/Shaders/filterRandomMappingB.vert");
    programB.addShaderFromSourceFile(QOpenGLShader::Fragment, ":/shaders/Shaders/filterRandomMappingB.frag");
    programB.link();
    setlocale(LC_ALL, "");
}

Again at this point, we don't know what the size of the incoming video is, so we can't define a frame buffer object to hold it shuffled version.  So we have to wait until the first call to the process() method:

void LAURandomizePixelsGLWidget::process()
{
    if (framesA.isEmpty()) {
        framesA = framesB;
        framesB.clear();
    }

    // SEE IF WE NEED NEW FBOS
    if (videoTexture) {
        if (frameBufferObject == NULL) {
            // CREATE A FORMAT OBJECT FOR CREATING THE FRAME BUFFER
            QOpenGLFramebufferObjectFormat frameBufferObjectFormat;
            frameBufferObjectFormat.setInternalTextureFormat(GL_RGBA32F);

            frameBufferObject = new QOpenGLFramebufferObject(videoTexture->width(), 
                                videoTexture->height(), frameBufferObjectFormat);
            frameBufferObject->release();
        } else if (frameBufferObject->width() != videoTexture->width() || 
                   frameBufferObject->height() != videoTexture->height()) {
            delete frameBufferObject;

            // CREATE A FORMAT OBJECT FOR CREATING THE FRAME BUFFER
            QOpenGLFramebufferObjectFormat frameBufferObjectFormat;
            frameBufferObjectFormat.setInternalTextureFormat(GL_RGBA32F);

            frameBufferObject = new QOpenGLFramebufferObject(videoTexture->width(), 
                                videoTexture->height(), frameBufferObjectFormat);
            frameBufferObject->release();
        }

        // CHECK TO SEE IF WE NEED TO INITIALIZE THE MAPPING TEXTURE
        if (mapTexture == NULL || mapTexture->width() != videoTexture->width() || 
            mapTexture->height() != videoTexture->height()) {
            buildMappingTexture(videoTexture->width(), videoTexture->height());
        }

        // SET CLEAR COLOR AS NOT A NUMBERS
        glClearColor(NAN, NAN, NAN, NAN);

        // CALCULATE THE GRADIENT BUFFER
        if (frameBufferObject->bind()) {
            if (programA.bind()) {
                // CLEAR THE FRAME BUFFER OBJECT
                glViewport(0, 0, frameBufferObject->width(), frameBufferObject->height());
                glClear(GL_COLOR_BUFFER_BIT | GL_DEPTH_BUFFER_BIT);

                // BIND VBOS FOR DRAWING TRIANGLES ON SCREEN
                if (quadVertexBuffer.bind()) {
                    if (quadIndexBuffer.bind()) {
                        // BIND THE TEXTURE FROM THE ORIGINAL SCAN
                        glActiveTexture(GL_TEXTURE0);
                        videoTexture->bind();
                        programA.setUniformValue("qt_texture", 0);

                        glActiveTexture(GL_TEXTURE1);
                        mapTexture->bind();
                        programA.setUniformValue("qt_map", 1);

                        // SET THE LAYER IN THE RANDOM MAPPING TEXTURE
                        int index = qFloor((double)rand() / (double)RAND_MAX * framesA.count());
                        programA.setUniformValue("qt_index", framesA.at(index));
                        framesB << framesA.takeAt(index);

                        // TELL OPENGL PROGRAMMABLE PIPELINE HOW TO LOCATE VERTEX POSITION DATA
                        glVertexAttribPointer(programA.attributeLocation("qt_vertex"), 4, 
                                              GL_FLOAT, GL_FALSE, 4 * sizeof(float), 0);
                        programA.enableAttributeArray("qt_vertex");
                        glDrawElements(GL_TRIANGLES, 6, GL_UNSIGNED_INT, 0);

                        // RELEASE THE FRAME BUFFER OBJECT AND ITS ASSOCIATED GLSL PROGRAMS
                        quadIndexBuffer.release();
                    }
                    quadVertexBuffer.release();
                }
                programA.release();
            }
            frameBufferObject->release();
        }
        update();
    }
}