0.8 ffmpeg videoplayer with threaded sound (openAL) in synch

Post those lines of code you feel like sharing or find what you require for your project here; or simply use them as tutorials.
thespecial1
Posts: 135
Joined: Thu Oct 30, 2008 11:56 am
Location: UK
Contact:

0.8 ffmpeg videoplayer with threaded sound (openAL) in synch

Post by thespecial1 »

thanks to stephan1117 for providing the working irrilicht/ffmpeg video source and to chris robbinson at http://kcat.strangesoft.net/openal.html#tutorials for the openal source code that the class below is built from.

Has taken some time I definatly know alot more about video/audio playback, now that I am at the top of the learning curve, here is an unfinished version of the class, is missing stuff like pause/stop/rewind and probably need some memory clean up work.

the openAL code can be disabled via a #define in the header, if someone mods this for one of the other sound choices please pass the code back to me so that I can merge it in.

videoplayer.h

Code: Select all

#ifndef __VIDEO_PLAYER_H__ ////////////////////////////////////////////////////////////////////////////////////////////////////////////////
///////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////
// Original code sources from juliusctw, Hansel, stepan1117
// Heavily Modified/Merged by theSpecial1
///////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////


// defines ////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////
#define __VIDEO_PLAYER_H__
#define NUM_BUFFERS 3
#define BUFFER_SIZE 19200
#define SOUND_OPENAL
///////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////


// includes ///////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////
#include <irrlicht.h>
#include <vector>
#include <stdlib.h>
#include <stdio.h>
#include <iostream>
extern "C" {
	#include <string.h>
	#include <avcodec.h>
	#include <avformat.h>
	#include <swscale.h>
#ifdef SOUND_OPENAL
	#include <signal.h>
	#include <AL/al.h>
	#include <AL/alc.h>
	#include <AL/alut.h>
#endif
} /////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////


// namespaces /////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////
using namespace irr;
using namespace core;
using namespace scene;
using namespace video;
using namespace io;
using namespace gui;
///////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////


// structures /////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////
typedef struct MyFile *FilePtr;
typedef struct MyStream *StreamPtr;
struct MyStream {
	AVCodecContext *CodecCtx;
	int StreamIdx;

	char *Data;
	size_t DataSize;
	size_t DataSizeMax;
	char *DecodedData;
	size_t DecodedDataSize;

	FilePtr parent;
};

struct MyFile {
	AVFormatContext *FmtCtx;
	StreamPtr *Streams;
	size_t StreamsSize;
}; ////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////


// main class definition //////////////////////////////////////////////////////////////////////////////////////////////////////////////////
class cVideoPlayer {
private: //////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////
    irr::ITimer *Timer;
    irr::video::IVideoDriver *IrrVideoDriver;
    irr::video::IImage *CurrentImage;
    irr::video::ITexture *CurrentTexture;

	FilePtr file;
    StreamPtr streamA, streamV;
	
    unsigned long lastTime;
    double SecondsPerFrame;
	float framerate;
	int actualFrame;

	int desiredH, desiredW;
	IMeshSceneNode *outputMesh;
	
	std::vector<AVFrame> Frame_Buffer;    
	bool bFrameDisplayed;
	AVFrame *Frame;
    AVFrame *FrameRGB;
    int NumBytes;
    uint8_t *Buffer;
	s32* p;
    s32* pimage;
	
#ifdef SOUND_OPENAL
    ALuint buffers[NUM_BUFFERS];
    ALuint source;
    ALint state; 
    ALbyte *data;                   
	ALenum old_format;
	ALenum format;
#endif
    int count; 
    int i; 	
    int old_rate;        
    int channels;
    int bits;
    int rate;
    int basetime;

	bool initAV(void);
	FilePtr openAVFile(const char *fname);
	void closeAVFile(FilePtr file);	
	bool DumpFrame(AVFrame *pFrame, int width, int height, bool needResize);
	
	StreamPtr getAVAudioStream(FilePtr file, int streamnum);
	StreamPtr getAVVideoStream(FilePtr file, int streamnum);
	int getAVAudioInfo(StreamPtr stream, int *rate, int *channels, int *bits);
	bool getNextPacket(FilePtr file, int streamidx);
	int getAVAudioData(StreamPtr stream, void *data, int length);
	AVFrame *getNextFrame(void);		
public: ///////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////
	cVideoPlayer(irr::video::IVideoDriver *irrVideoDriver, irr::ITimer *timer, int desiredW, int desiredH);
	cVideoPlayer(irr::video::IVideoDriver *irrVideoDriver, irr::ITimer *timer, int desiredW, int desiredH, IMeshSceneNode *outputMesh);	
	bool open(core::stringc sFileName);
	bool refresh(void);
	void drawVideoTexture(void);
	void changeResolution(int w, int h);
	irr::video::ITexture* getVideoTexture(void);
	float getFrameRate(void);
    ~cVideoPlayer();
}; ////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////
#endif ////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////

videoplayer.cpp

Code: Select all

// header /////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////
#include "videoPlayer.h"
///////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////


// audio hard quit ////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////
static volatile int quitnow = 0;
static void handle_sigint(int signum) {
    (void)signum;
    quitnow = 1;
} /////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////


// constructor default ///////////////////////////////////////////////////////////////////////////////////////////////////////////////////
cVideoPlayer::cVideoPlayer(irr::video::IVideoDriver *irrVideoDriver, irr::ITimer *timer, int desiredW, int desiredH) : IrrVideoDriver(irrVideoDriver), Timer(timer) {    
	// set private vars
    this->desiredH = desiredH;
    this->desiredW = desiredW;
	this->outputMesh = NULL;

	// initial sound/audio config
	initAV();
} /////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////


// constructor alternate mesh output //////////////////////////////////////////////////////////////////////////////////////////////////////
cVideoPlayer::cVideoPlayer(irr::video::IVideoDriver *irrVideoDriver, irr::ITimer *timer, int desiredW, int desiredH, IMeshSceneNode *outputMesh) : IrrVideoDriver(irrVideoDriver), Timer(timer) {    
	// set private vars
    this->desiredH = desiredH;
    this->desiredW = desiredW;
	this->outputMesh = outputMesh;

	// initial sound/audio config
	initAV();
} /////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////


// initialise audio/video /////////////////////////////////////////////////////////////////////////////////////////////////////////////////
bool cVideoPlayer::initAV(void) {
	// initial video flags
	IrrVideoDriver->setTextureCreationFlag(ETCF_CREATE_MIP_MAPS, false);
    IrrVideoDriver->setTextureCreationFlag(ETCF_ALWAYS_32_BIT, true);
	CurrentTexture = NULL;
	bFrameDisplayed = true;
    
	// Register all formats and codecs
    av_register_all();

#ifdef SOUND_OPENAL
	// signal handler
	if (signal(SIGINT, handle_sigint) == SIG_ERR) {
        fprintf(stderr, "Unable to set handler for SIGINT!\n");
        return false;
    }

	// audio temp buffer
    data = (ALbyte *)malloc(BUFFER_SIZE);
    if (!data) {
        fprintf(stderr, "Out of memory allocating temp buffer!\n");
        return false;
    }

    // Initialize ALUT with default settings 
    if (alutInit(NULL, NULL) == AL_FALSE) {
        free(data);
        fprintf(stderr, "Could not initialize ALUT (%s)!\n", alutGetErrorString(alutGetError()));
        return false;
    }

    // Generate the buffers and source 
    alGenBuffers(NUM_BUFFERS, buffers);
    if (alGetError() != AL_NO_ERROR) {
        alutExit();
        free(data);
        fprintf(stderr, "Could not create buffers...\n");
        return false;
    }
    alGenSources(1, &source);
    if (alGetError() != AL_NO_ERROR) {
        alDeleteBuffers(NUM_BUFFERS, buffers);
        alutExit();
        free(data);
        fprintf(stderr, "Could not create source...\n");
        return false;
    }

    // Set parameters so mono sources won't distance attenuate 
    alSourcei(source, AL_SOURCE_RELATIVE, AL_TRUE);
    alSourcei(source, AL_ROLLOFF_FACTOR, 0);
    if (alGetError() != AL_NO_ERROR) {
        alDeleteSources(1, &source);
        alDeleteBuffers(NUM_BUFFERS, buffers);
        alutExit();
        free(data);
        fprintf(stderr, "Could not set source parameters...\n");
        return false;
    }
#endif

	return true;
} /////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////


// get the next frame from the buffer /////////////////////////////////////////////////////////////////////////////////////////////////////
AVFrame *cVideoPlayer::getNextFrame(void) {
	// get more frames if buffer empty
	while (Frame_Buffer.size() == 0) {
		if (!getNextPacket(streamA->parent, streamA->StreamIdx)) {
			break;
		}
	}
	
	// return a frame if we have one
	if (Frame_Buffer.size() > 0) { // we have frames
		AVFrame *t = avcodec_alloc_frame();
		*t = Frame_Buffer.back();
		Frame_Buffer.erase(Frame_Buffer.begin());
		return t;
	} else {
		return NULL;
	}	
} /////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////


// refresh audio/video ////////////////////////////////////////////////////////////////////////////////////////////////////////////////////
bool cVideoPlayer::refresh(void) {
	static struct SwsContext *img_convert_ctx;
	static int currentX = 0;
	static int currentY = 0;
	bool needResize = false;

#ifdef SOUND_OPENAL
	// ensure we arnt out of sound
	if (count > 0 && !quitnow) {
		// Check if any buffers on the source are finished playing 
		ALint processed = 0;
		alGetSourcei(source, AL_BUFFERS_PROCESSED, &processed);
		if (processed == 0) {
			// All buffers are full. Check if the source is still playing.
			// If not, restart it, otherwise, print the time and rest 
			alGetSourcei(source, AL_SOURCE_STATE, &state);
			if (alGetError() != AL_NO_ERROR) {
				fprintf(stderr, "Error checking source state...\n");
				return false;
			}
			if (state != AL_PLAYING) {
				alSourcePlay(source);
				if (alGetError() != AL_NO_ERROR) {
					closeAVFile(file);
					fprintf(stderr, "Error restarting playback...\n");
					return false;
				}
			} else {
				ALint offset;
				alGetSourcei(source, AL_SAMPLE_OFFSET, &offset);
				// Add the base time to the offset. Each count of basetime
				// represents one buffer, which is BUFFER_SIZE in bytes 
				offset += basetime * (BUFFER_SIZE/channels*8/bits);
				//fprintf(stderr, "\rTime: %d:%05.02f", offset/rate/60, (offset%(rate*60))/(float)rate);
				//alutSleep((ALfloat)0.01);
			}

			// all done for this iteration
			//return true;
			goto allaudiodone;
		}

		// Read the next chunk of data and refill the oldest buffer 
		count = getAVAudioData(streamA, data, BUFFER_SIZE);
		if (count > 0) {
			ALuint buf = 0;
			alSourceUnqueueBuffers(source, 1, &buf);
			if (buf != 0) {
				alBufferData(buf, format, data, count, rate);
				alSourceQueueBuffers(source, 1, &buf);
				// For each successfully unqueued buffer, increment the
				// base time. The retrieved sample offset for timing is
				// relative to the start of the buffer queue, so for every
				// buffer that gets unqueued we need to increment the base
				// time to keep the reported time accurate and not fall backwards 
				basetime++;
			}
			if (alGetError() != AL_NO_ERROR) {
				fprintf(stderr, "Error buffering data...\n");
				return false;
			}
		}
	} else { // out of audio
		return false;
	}	
#endif

allaudiodone:		
	// process the next video frame from the buffer		
	if (Timer->getRealTime() - lastTime > (SecondsPerFrame*1000)) {
		lastTime = Timer->getRealTime();
		Frame = getNextFrame();
		if (Frame != NULL) {
			if (img_convert_ctx == NULL) {
				currentX = desiredW;
				currentY = desiredH;

				int w = streamV->CodecCtx->width;
                int h = streamV->CodecCtx->height;

				img_convert_ctx = sws_getContext(w, h, streamV->CodecCtx->pix_fmt, desiredW, desiredH, PIX_FMT_RGB32, 
					SWS_FAST_BILINEAR | SWS_CPU_CAPS_MMX2, NULL, NULL, NULL);
                if (img_convert_ctx == NULL) {
					fprintf(stderr, "Cannot initialize the conversion context!\n");
                    return false;
                }
            } else if (currentX != desiredW || currentY != desiredH) {
				needResize = true;
				currentX = desiredW;
				currentY = desiredH;

				int w = streamV->CodecCtx->width;
				int h = streamV->CodecCtx->height;

				sws_freeContext(img_convert_ctx);
				img_convert_ctx = NULL;

				img_convert_ctx = sws_getContext(w, h, streamV->CodecCtx->pix_fmt, desiredW, desiredH, PIX_FMT_RGB32, 
					SWS_FAST_BILINEAR | SWS_CPU_CAPS_MMX2, NULL, NULL, NULL);
				if (img_convert_ctx == NULL) {
					fprintf(stderr, "Cannot re-initialize the conversion context!\n");
					return false;
				}
            }

			sws_scale(img_convert_ctx, Frame->data, Frame->linesize, 0, streamV->CodecCtx->height, FrameRGB->data, FrameRGB->linesize);

            //printf("Dumping Frame: %d  ::  FrameRate: %f\n", actualFrame, framerate);

            // Dump the frame
            DumpFrame(FrameRGB, desiredW, desiredH, needResize);

			// increase frame/time counts
            actualFrame++;
			
			bFrameDisplayed = false;
		} else {
			return false;
		}
	} 

	// success, more audio/video to follow
	return true;
} /////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////


// draw the current texture onscreen //////////////////////////////////////////////////////////////////////////////////////////////////////
void cVideoPlayer::drawVideoTexture(void) {
	if (CurrentTexture != NULL) {
		if (outputMesh != NULL) {
			if (!bFrameDisplayed) {
				outputMesh->setMaterialTexture(0, CurrentTexture);
				bFrameDisplayed = true;
			}
		} else {
			IrrVideoDriver->draw2DImage(CurrentTexture, irr::core::position2d<irr::s32>(0,0),
				irr::core::rect<irr::s32>(0,0,desiredW,desiredH), 0, irr::video::SColor(255,255,255,255), false);
		}
	}
} /////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////


// refresh audio/video ////////////////////////////////////////////////////////////////////////////////////////////////////////////////////
bool cVideoPlayer::open(core::stringc sFileName) {
	// reset initial vars
	format = 0;
	basetime = 0;

    // check the file opens up
    file = openAVFile(sFileName.c_str());
	if (!file) {
		fprintf(stderr, "Could not open %s\n", sFileName.c_str());
        return false;
	}	

	// create the stream objects
	streamV = getAVVideoStream(file, 0);
    if (!streamV) {
        closeAVFile(file);
        fprintf(stderr, "Could not open video in %s\n", sFileName.c_str());
        return false;
    }
    streamA = getAVAudioStream(file, 0);
    if (!streamA) {
        closeAVFile(file);
        fprintf(stderr, "Could not open audio in %s\n", sFileName.c_str());
        return false;
    }
    
	// Get the stream format, and figure out the OpenAL format. We use the
    // AL_EXT_MCFORMATS extension to provide output of 4 and 5.1 audio streams 
    if (getAVAudioInfo(streamA, &rate, &channels, &bits) != 0) {
        closeAVFile(file);
        fprintf(stderr, "Error getting audio info for %s\n", sFileName.c_str());
        return false;
    }

#ifdef SOUND_OPENAL
	// determine the sound formats
    if (bits == 8) {
        if (channels == 1) format = AL_FORMAT_MONO8;
        if (channels == 2) format = AL_FORMAT_STEREO8;
        if (alIsExtensionPresent("AL_EXT_MCFORMATS")) {
            if (channels == 4) format = alGetEnumValue("AL_FORMAT_QUAD8");
            if (channels == 6) format = alGetEnumValue("AL_FORMAT_51CHN8");
        }
    }
    if (bits == 16) {
        if (channels == 1) format = AL_FORMAT_MONO16;
        if (channels == 2) format = AL_FORMAT_STEREO16;
        if (alIsExtensionPresent("AL_EXT_MCFORMATS")) {
            if (channels == 4) format = alGetEnumValue("AL_FORMAT_QUAD16");
            if (channels == 6) format = alGetEnumValue("AL_FORMAT_51CHN16");
        }
    }
    if (format == 0) {
        closeAVFile(file);
        fprintf(stderr, "Unhandled format (%d channels, %d bits) for %s", channels, bits, sFileName.c_str());
        return false;
    }

    // If the format of the last file matches the current one, we can skip
    // the initial load and let the processing loop take over (gap-less playback!) 
    count = 1;
    if (format != old_format || rate != old_rate) {
        int j;

        old_format = format;
        old_rate = rate;

        // Wait for the last song to finish playing 
        do {
            alutSleep((ALfloat)0.01);
            alGetSourcei(source, AL_SOURCE_STATE, &state);
        } while(alGetError() == AL_NO_ERROR && state == AL_PLAYING);

        // Rewind the source position and clear the buffer queue 
        alSourceRewind(source);
        alSourcei(source, AL_BUFFER, 0);

        // Fill and queue the buffers 
        for(j = 0;j < NUM_BUFFERS;j++) {
            // Make sure we get some data to give to the buffer 
            count = getAVAudioData(streamA, data, BUFFER_SIZE);
            if(count <= 0) return false;

            // Buffer the data with OpenAL and queue the buffer onto the source 
            alBufferData(buffers[j], format, data, count, rate);
            alSourceQueueBuffers(source, 1, &buffers[j]);
        }
        if (alGetError() != AL_NO_ERROR) {
            closeAVFile(file);
            fprintf(stderr, "Error buffering initial data...\n");
            return false;
        }

        // Now start playback! 
        alSourcePlay(source);
        if (alGetError() != AL_NO_ERROR) {
            closeAVFile(file);
            fprintf(stderr, "Error starting playback...\n");
            return false;
        }
    } else {
        // When skipping the initial load of a file (because the previous
        // one is using the same exact format), set the base time to the
        // negative of the queued buffers. This is so the timing will be
        // from the beginning of this file, which won't start playing until
        // the next buffer to get queued does */
        basetime = -NUM_BUFFERS;
    }
#endif

    //fprintf(stderr, "\rPlaying %s (%d-bit, %d channels, %dhz)\n", sFileName.c_str(), bits, channels, rate);
	return true;
} /////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////


// dump the frame to texture //////////////////////////////////////////////////////////////////////////////////////////////////////////////
bool cVideoPlayer::DumpFrame(AVFrame *pFrame, int width, int height, bool needResize) {
    static char first_time = 1;

    if (first_time) {
		CurrentImage = IrrVideoDriver->createImageFromData(irr::video::ECF_A8R8G8B8,
                       irr::core::dimension2d<irr::s32>(width, height),
                       pFrame->data[0],
                       true);
        first_time = 0;
        CurrentTexture = IrrVideoDriver->addTexture("movie", CurrentImage);
    }

    if (needResize) {
       IrrVideoDriver->removeTexture(CurrentTexture);
       CurrentImage = IrrVideoDriver->createImageFromData(irr::video::ECF_A8R8G8B8,
                              irr::core::dimension2d<irr::s32>(width, height),
                              pFrame->data[0],
                              true);
        CurrentTexture = IrrVideoDriver->addTexture("movie", CurrentImage);
    }

    p = (s32*)CurrentTexture->lock ();
    pimage = (s32*)CurrentImage->lock ();

    for (int i = 0; i < width*height; i++) p[i] = pimage[i];

    // unlock de texture and the image
    CurrentTexture->unlock();
    CurrentImage->unlock();

	return true;
} /////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////


// internal open file /////////////////////////////////////////////////////////////////////////////////////////////////////////////////////
FilePtr cVideoPlayer::openAVFile(const char *fname) {
    static int done = 0;
    FilePtr file;

    // We need to make sure ffmpeg is initialized. Optionally silence warning output from the lib 
    if(!done) {av_register_all();
    av_log_set_level(AV_LOG_ERROR);}
    done = 1;

    file = (FilePtr)calloc(1, sizeof(*file));
    if (file && av_open_input_file(&file->FmtCtx, fname, NULL, 0, NULL) == 0) {
        // After opening, we must search for the stream information because not
        // all formats will have it in stream headers (eg. system MPEG streams)
        if (av_find_stream_info(file->FmtCtx) >= 0) return file;
        av_close_input_file(file->FmtCtx);
    }
    free(file);
    return NULL;
} /////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////


// internal close file ////////////////////////////////////////////////////////////////////////////////////////////////////////////////////
void cVideoPlayer::closeAVFile(FilePtr file) {
    size_t i;

    if(!file) return;

    for(i = 0;i < file->StreamsSize;i++) {
        avcodec_close(file->Streams[i]->CodecCtx);
        free(file->Streams[i]->Data);
        free(file->Streams[i]->DecodedData);
        free(file->Streams[i]);
    }
    free(file->Streams);

    av_close_input_file(file->FmtCtx);
    free(file);
} /////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////


// internal find the relevent streams /////////////////////////////////////////////////////////////////////////////////////////////////////
StreamPtr cVideoPlayer::getAVAudioStream(FilePtr file, int streamnum) {
    unsigned int i;
    if (!file) return NULL;
    for(i = 0;i < file->FmtCtx->nb_streams;i++) {
        if (file->FmtCtx->streams[i]->codec->codec_type != CODEC_TYPE_AUDIO) continue;

        if (streamnum == 0) {
            StreamPtr stream;
            AVCodec *codec;
            void *temp;
            size_t j;

            // Found the requested stream. Check if a handle to this stream
            // already exists and return it if it does 
            for(j = 0;j < file->StreamsSize;j++) {
                if (file->Streams[j]->StreamIdx == (int)i) return file->Streams[j];
            }

            // Doesn't yet exist. Now allocate a new stream object and fill in its info 
            stream = (StreamPtr)calloc(1, sizeof(*stream));
            if (!stream) return NULL;
            stream->parent = file;
            stream->CodecCtx = file->FmtCtx->streams[i]->codec;
            stream->StreamIdx = i;

            // Try to find the codec for the given codec ID, and open it 
            codec = avcodec_find_decoder(stream->CodecCtx->codec_id);
            if (!codec || avcodec_open(stream->CodecCtx, codec) < 0) {
                free(stream);
                return NULL;
            }

            // Allocate space for the decoded data to be stored in before it gets passed to the app 
            stream->DecodedData = (char *)malloc(AVCODEC_MAX_AUDIO_FRAME_SIZE);
            if (!stream->DecodedData) {
                avcodec_close(stream->CodecCtx);
                free(stream);
                return NULL;
            }

            // Append the new stream object to the stream list. The original
            // pointer will remain valid if realloc fails, so we need to use
            // another pointer to watch for errors and not leak memory 
            temp = realloc(file->Streams, (file->StreamsSize+1) * sizeof(*file->Streams));
            if (!temp) {
                avcodec_close(stream->CodecCtx);
                free(stream->DecodedData);
                free(stream);
                return NULL;
            }
            file->Streams = (StreamPtr*)temp;
            file->Streams[file->StreamsSize++] = stream;
            return stream;
        }
        streamnum--;
    }
    return NULL;
}
StreamPtr cVideoPlayer::getAVVideoStream(FilePtr file, int streamnum) {
    unsigned int i;
    if (!file) return NULL;
    for(i = 0;i < file->FmtCtx->nb_streams;i++) {
        if (file->FmtCtx->streams[i]->codec->codec_type != CODEC_TYPE_VIDEO) continue;

        if (streamnum == 0) {
            StreamPtr stream;
            AVCodec *codec;
            void *temp;
            size_t j;

            // Found the requested stream. Check if a handle to this stream
            // already exists and return it if it does 
            for(j = 0;j < file->StreamsSize;j++) {
                if (file->Streams[j]->StreamIdx == (int)i) return file->Streams[j];
            }

            // Doesn't yet exist. Now allocate a new stream object and fill in its info 
            stream = (StreamPtr)calloc(1, sizeof(*stream));
            if (!stream) return NULL;
            stream->parent = file;
            stream->CodecCtx = file->FmtCtx->streams[i]->codec;
            stream->StreamIdx = i;

            // Try to find the codec for the given codec ID, and open it 
            codec = avcodec_find_decoder(stream->CodecCtx->codec_id);
			if (codec->capabilities & CODEC_CAP_TRUNCATED) stream->CodecCtx->flags|=CODEC_FLAG_TRUNCATED;
            if (!codec || avcodec_open(stream->CodecCtx, codec) < 0) {
                free(stream);
                return NULL;
            }

			// get the movie framerate
			framerate = (float)file->FmtCtx->streams[i]->r_frame_rate.num;
			SecondsPerFrame = (double)file->FmtCtx->streams[i]->r_frame_rate.den / file->FmtCtx->streams[i]->r_frame_rate.num;

			// setup temp allocations			
			Frame = avcodec_alloc_frame();
			FrameRGB=avcodec_alloc_frame();
			if (FrameRGB == NULL) return NULL;
			NumBytes = avpicture_get_size(PIX_FMT_RGB32, desiredW, desiredH);
			Buffer = new uint8_t[NumBytes];
			avpicture_fill((AVPicture *)FrameRGB, Buffer, PIX_FMT_RGB32, desiredW, desiredH);

            // Append the new stream object to the stream list. The original
            // pointer will remain valid if realloc fails, so we need to use
            // another pointer to watch for errors and not leak memory 
            temp = realloc(file->Streams, (file->StreamsSize+1) * sizeof(*file->Streams));
            if (!temp) {
                avcodec_close(stream->CodecCtx);
                free(stream->DecodedData);
                free(stream);
                return NULL;
            }
            file->Streams = (StreamPtr*)temp;
            file->Streams[file->StreamsSize++] = stream;
            return stream;
        }
        streamnum--;
    }
    return NULL;
} /////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////


// internal grab audio stream bits etc ////////////////////////////////////////////////////////////////////////////////////////////////////
int cVideoPlayer::getAVAudioInfo(StreamPtr stream, int *rate, int *channels, int *bits) {
    if (!stream || stream->CodecCtx->codec_type != CODEC_TYPE_AUDIO) return 1;

    if (rate) *rate = stream->CodecCtx->sample_rate;
    if (channels) *channels = stream->CodecCtx->channels;
    if (bits) *bits = 16;

    return 0;
} /////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////


// internal get next packet ///////////////////////////////////////////////////////////////////////////////////////////////////////////////
bool cVideoPlayer::getNextPacket(FilePtr file, int streamidx) {
    static AVPacket packet;
    static int bytesRemaining=0;
    static uint8_t  *rawData;
    static int bytesDecoded;
    static int frameFinished;
	AVFrame *pFrame;
	pFrame = avcodec_alloc_frame();

	// read frames until we have an audio packet to return
    while(av_read_frame(file->FmtCtx, &packet) >= 0) {
        StreamPtr *iter = file->Streams;
        size_t i;

        // Check each stream the user has a handle for, looking for the one this packet belongs to 
        for(i = 0;i < file->StreamsSize;i++,iter++) {
            if ((*iter)->StreamIdx == packet.stream_index) {
				if (packet.stream_index == streamidx) {  // audio packets				
					size_t idx = (*iter)->DataSize;

					// Found the stream. Grow the input data buffer as needed to
					// hold the new packet's data. Additionally, some ffmpeg codecs
					// need some padding so they don't overread the allocated buffer
					if (idx+packet.size > (*iter)->DataSizeMax) {
						void *temp = realloc((*iter)->Data, idx+packet.size + FF_INPUT_BUFFER_PADDING_SIZE);
						if (!temp) break;
						(*iter)->Data = (char *)temp;
						(*iter)->DataSizeMax = idx+packet.size;
					}

					// Copy the packet and free it 
					memcpy(&(*iter)->Data[idx], packet.data, packet.size);
					(*iter)->DataSize += packet.size;

					// Return if this stream is what we needed a packet for 
					if (streamidx == (*iter)->StreamIdx) {
						av_free_packet(&packet);
						return true;
					}
					break;
				} else {  // continue decoding video frames to the buffer
					bytesRemaining += packet.size;
					rawData = packet.data;
					
					// Work on the current packet until we have decoded all of it
					while (bytesRemaining > 0) {
						// Decode the next chunk of data
						bytesDecoded = avcodec_decode_video((*iter)->CodecCtx, pFrame, &frameFinished, rawData, bytesRemaining);

						// Was there an error?
						if (bytesDecoded < 0) {
							fprintf(stderr, "Error while decoding frame\n");
							//return false;
						}

						bytesRemaining -= bytesDecoded;
						rawData += bytesDecoded;

						// Did we finish the current frame? Then we can return
						if (frameFinished) { // add the current frame to the buffer
							Frame_Buffer.push_back(*pFrame);
							av_free(pFrame);
							pFrame = avcodec_alloc_frame();
							frameFinished = false;
						}
					}
				}
            }
        }

        // Free the packet and look for another 
        av_free_packet(&packet);
		if (pFrame != NULL)
			av_free(pFrame);
    }

	return false;
} /////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////


// internal get audio data ////////////////////////////////////////////////////////////////////////////////////////////////////////////////
int cVideoPlayer::getAVAudioData(StreamPtr stream, void *data, int length) {
    int dec = 0;

    if (!stream || stream->CodecCtx->codec_type != CODEC_TYPE_AUDIO) return 0;
    while(dec < length) {
        // If there's any pending decoded data, deal with it first 
        if (stream->DecodedDataSize > 0) {
            // Get the amount of bytes remaining to be written, 
			// and clamp to the amount of decoded data we have 
            size_t rem = length-dec;
            if (rem > stream->DecodedDataSize) rem = stream->DecodedDataSize;

            // Copy the data to the app's buffer and increment 
            memcpy(data, stream->DecodedData, rem);
            data = (char*)data + rem;
            dec += rem;

            // If there's any decoded data left, move it to the front of the
            // buffer for next time 
            if (rem < stream->DecodedDataSize)
                memmove(stream->DecodedData, &stream->DecodedData[rem], stream->DecodedDataSize - rem);
            stream->DecodedDataSize -= rem;
        }

        // Check if we need to get more decoded data 
        if (stream->DecodedDataSize == 0) {
            size_t insize;
            int size;
            int len;

            insize = stream->DataSize;
            if (insize == 0) {
                getNextPacket(stream->parent, stream->StreamIdx);
                
				// If there's no more input data, break and return what we have 
                if (insize == stream->DataSize) break;
                insize = stream->DataSize;
                memset(&stream->Data[insize], 0, FF_INPUT_BUFFER_PADDING_SIZE);
            }

            // Clear the input padding bits 
            // Decode some data, and check for errors 
            size = AVCODEC_MAX_AUDIO_FRAME_SIZE;
            while((len=avcodec_decode_audio2(stream->CodecCtx, (int16_t*)stream->DecodedData, 
					&size, (uint8_t*)stream->Data, insize)) == 0) {
                
				if (size > 0) break;
                getNextPacket(stream->parent, stream->StreamIdx);
                if (insize == stream->DataSize) break;
                insize = stream->DataSize;
                memset(&stream->Data[insize], 0, FF_INPUT_BUFFER_PADDING_SIZE);
            }

            if (len < 0) break;

            if (len > 0) {
                // If any input data is left, move it to the start of the
                // buffer, and decrease the buffer size 
                size_t rem = insize-len;
                if (rem) memmove(stream->Data, &stream->Data[len], rem);
                stream->DataSize = rem;
            }
            // Set the output buffer size 
            stream->DecodedDataSize = size;
        }
    }

    // Return the number of bytes we were able to get 
    return dec;
} /////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////


// switch res /////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////
void cVideoPlayer::changeResolution(int w, int h) {
	if (desiredW != w || desiredH != h){
		std::cout << "Changing resolution from ["<< desiredW << "x" << desiredH << "] to [" << w << "x" << h << "]" << std::endl;

		desiredW = w;
		desiredH = h;

		delete [] Buffer;
		//av_free((AVPicture *)FrameRGB);
		NumBytes = avpicture_get_size(PIX_FMT_RGB32, desiredW, desiredH);

		Buffer = new uint8_t[NumBytes];

		// Assign appropriate parts of buffer to image planes in pFrameRGB
		avpicture_fill((AVPicture *)FrameRGB, Buffer, PIX_FMT_RGB32, desiredW, desiredH);
	}
} /////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////


// write back the current texture(or frame if u prefer) ///////////////////////////////////////////////////////////////////////////////////
irr::video::ITexture* cVideoPlayer::getVideoTexture(void) {
    return CurrentTexture;
} /////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////


// return current frame rate //////////////////////////////////////////////////////////////////////////////////////////////////////////////
float cVideoPlayer::getFrameRate(void) {
    return framerate;
} /////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////


// destruct ///////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////
cVideoPlayer::~cVideoPlayer() {	
	// Free the RGB image
	if (Buffer != NULL)
		delete [] Buffer;

	if (FrameRGB != NULL)
		av_free(FrameRGB);

	// Free the YUV frame
	if (Frame != NULL)
		av_free(Frame);

	// Close the codec
	if (streamV->CodecCtx != NULL)
		avcodec_close(streamV->CodecCtx);

	// close the file
	closeAVFile(file);
    fprintf(stderr, "\nDone.\n");

#ifdef SOUND_OPENAL
	// All data has been streamed in. Wait until the source stops playing it 
    do {
        alutSleep((ALfloat)0.01);
        alGetSourcei(source, AL_SOURCE_STATE, &state);
    } while(alGetError() == AL_NO_ERROR && state == AL_PLAYING);

    // All files done. Delete the source and buffers, and close OpenAL 
    alDeleteSources(1, &source);
    alDeleteBuffers(NUM_BUFFERS, buffers);
    alutExit();
    free(data);
#endif
} /////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////
I could really use one of the C++ master gods on here to pass on some suggestions to make it more efficient if they be willing???
Last edited by thespecial1 on Mon Mar 29, 2010 9:20 am, edited 8 times in total.
jpoag
Posts: 25
Joined: Mon Aug 10, 2009 1:00 am

Post by jpoag »

Hey,

I've recently implemented FFMPeg in the framework that I use and I've noticed that this code you've posted is missing a separate decoding thread. in fact, it's missing separate threads altogether.

I've used this tutorial:
http://www.dranger.com/ffmpeg/

It uses SDL and SDL_threads, but it was an easy jump to boost threads. Also, it has the FastForward and Rewind/Puase stuff. IIRC The only thing you really need to keep in the main thread is the swscale to the output texture and probably filling the audio buffer.
-James
FreakNigh
Posts: 122
Joined: Thu Oct 19, 2006 7:31 am
Location: Orlando FL, USA
Contact:

Post by FreakNigh »

Man irrlicht BADLY! needs a video player
Image

CvIrrCamController - 3D head tracking lib to create window effect with webcam
IrrAR - Attach Irrlicht nodes to real life markers
http://www.nighsoft.com/
thespecial1
Posts: 135
Joined: Thu Oct 30, 2008 11:56 am
Location: UK
Contact:

Post by thespecial1 »

i used the same sdl example for reference, multiple threads was the next step, need it to keep playing smoothly whilst the irrlicht engine does other tasks. i do like the look of boost.
christianclavet
Posts: 1638
Joined: Mon Apr 30, 2007 3:24 am
Location: Montreal, CANADA
Contact:

Post by christianclavet »

Hi, Can you add links to the dependancies?
(OPENAL SDK and FFMPEG library)

This look really something that we could try to have to IRRlicht (I mean an working source with all the dependancies with IRRlicht, so everyone could compile and see, right of the box.)

Thanks!
ellemayo
Posts: 15
Joined: Tue Oct 06, 2009 6:24 pm

Post by ellemayo »

what versions of the libraries did you create this on? I have other OpenAL/FFMPEG projects working fine but your example crashes on

pFrame = avcodec_alloc_frame(); (line 659)

stating
Windows has triggered a breakpoint in videoPlayer2.exe.
This may be due to a corruption of the heap, which indicates a bug in videoPlayer2.exe or any of the DLLs it has loaded.
GameDude
Posts: 498
Joined: Thu May 24, 2007 12:24 am

Post by GameDude »

Dang, that's a lot of code. Nice work. It would be nice to have this in the main build if Irrlicht. A video player would be great.
thespecial1
Posts: 135
Joined: Thu Oct 30, 2008 11:56 am
Location: UK
Contact:

Post by thespecial1 »

ellemayo wrote:what versions of the libraries did you create this on? I have other OpenAL/FFMPEG projects working fine but your example crashes on

pFrame = avcodec_alloc_frame(); (line 659)

stating
Windows has triggered a breakpoint in videoPlayer2.exe.
This may be due to a corruption of the heap, which indicates a bug in videoPlayer2.exe or any of the DLLs it has loaded.

I compiled the current version from SVN, , am curious what projects you are running with ffmpeg, sorry for the late reply, have been messing with ogre and only recently returned, I am writing a media player for my home box as I have some proper silly mass of movies and a very powerful machine, hence why I want a videoplayer to sit on my large TV that is highly pretty. How bout you??
Georgian
Posts: 60
Joined: Sat Mar 31, 2007 12:55 pm

work on windows.

Post by Georgian »

for those who want to use this player in windows.
what i did was:
download http://ffmpeg.arrozcru.org/builds/share ... 32.tar.bz2
from http://ffmpeg.arrozcru.org
add include files & link libraries from this
and also wrap ffmpeg includes with:
//include
extern "C"{
#include <libavcodec/avcodec.h>
#include <libavformat/avformat.h>
#include <libswscale/swscale.h>
#include <libavutil/avutil.h>
#include <inttypes.h>
}
//lib
#pragma comment (lib ,"avcodec-52.lib")
#pragma comment (lib ,"avdevice-52.lib")
#pragma comment (lib ,"avformat-52.lib")
#pragma comment (lib ,"avutil-49.lib")
#pragma comment (lib ,"swscale-0.lib")

and also you need inttypes.h which also is in that tar file.

as of OpenAl errors i cant help. i removed openal because i am using audiere in my project.

btw. thanks for this player.
i hope i helped someone by this post.
using Visual Studio 2003 , I use 3D Max 7, I love irrlicht its a really good and easy engine
thespecial1
Posts: 135
Joined: Thu Oct 30, 2008 11:56 am
Location: UK
Contact:

Re: work on windows.

Post by thespecial1 »

Georgian wrote: as of OpenAl errors i cant help. i removed openal because i am using audiere in my project.
glad it was of some use, could you post the class with the audio code for audiere added, btw the key problem I am working on with this player is how to get it to play abit smoother, is fine with cartoons/small movies but as soon as u stick a proper movie, especially HD into it then any large motion seems steppy, new version in the works ;0)
ellemayo
Posts: 15
Joined: Tue Oct 06, 2009 6:24 pm

Post by ellemayo »

For OpenAL in windows all you need to do is download the SDK and include the lib/inc folders

OpenAL 1.1 SDK\include
OpenAL 1.1 SDK\libs\Win32
OpenAL 1.1 SDK\ALUT\include
OpenAL 1.1 SDK\ALUT\lib

and add the following libraries to additional dependencies or as the other libraries were included above:

OpenAL32.lib
ALut.lib


I have still been having problems with this player, I am able to play one video but there are many others that will not play. Also the one that does play will crash at the end of it. Anyone else having these problems? Also I get the output incorrect frame size part way through the video...
Georgian
Posts: 60
Joined: Sat Mar 31, 2007 12:55 pm

Post by Georgian »

audierre integration in the way will finish it as soon as i get enough time, for now i have to get ready for RadioPhysics seminar.

so that no one says this post is offtopic:

actual frime is never initialized
when playing with this player i noticed that actual frame was something like -8494257245...........
using Visual Studio 2003 , I use 3D Max 7, I love irrlicht its a really good and easy engine
thespecial1
Posts: 135
Joined: Thu Oct 30, 2008 11:56 am
Location: UK
Contact:

Post by thespecial1 »

Georgian wrote:actual frime is never initialized
when playing with this player i noticed that actual frame was something like -8494257245...........
I can't see this one, is initialised in initAV, mayb I fixed that before
ellemayo wrote: I have still been having problems with this player, I am able to play one video but there are many others that will not play. Also the one that does play will crash at the end of it. Anyone else having these problems? Also I get the output incorrect frame size part way through the video...
the code you have been using is a work in progress, is currently the only ffmpeg version that plays the sound in synch but has plenty of raw edges
- video isn't playing as smooth as I would like, once I finish messing with boost this should be ok
- memory cleanup
- no seek
- various vid files dont play for some reason
- open a second file causes major crash in smgr->beginScene(...)
- and all the other missing features

however the version posted below does work alot better than the previous version posted, and doesnt crash at the end of the video playback

thou be warned I went throu a major rename of the funx/vars as was becoming messy (mainly because its two different programmers code merged)

murmuurVIDEO.h

Code: Select all

#ifndef __MURMUUR_VIDEO_H__ /////////////////////////////////////////////////////////////////////////////////
/////////////////////////////////////////////////////////////////////////////////////////////////////////////
// Original code sources from juliusctw, Hansel, stepan1117
// Heavily Modified/Merged by theSpecial1
/////////////////////////////////////////////////////////////////////////////////////////////////////////////


// defines //////////////////////////////////////////////////////////////////////////////////////////////////
#define __MURMUUR_VIDEO_H__
#define NUM_BUFFERS 3
#define BUFFER_SIZE 19200
#define SOUND_OPENAL
/////////////////////////////////////////////////////////////////////////////////////////////////////////////


// includes /////////////////////////////////////////////////////////////////////////////////////////////////
#include <irrlicht.h>
#include <vector>
#include <stdlib.h>
#include <stdio.h>
#include <iostream>
extern "C" {
	#include <string.h>
	#include <avcodec.h>
	#include <avformat.h>
	#include <swscale.h>
#ifdef SOUND_OPENAL
	#include <signal.h>
	#include <AL/al.h>
	#include <AL/alc.h>
	#include <AL/alut.h>
#endif
} ///////////////////////////////////////////////////////////////////////////////////////////////////////////


// namespaces ///////////////////////////////////////////////////////////////////////////////////////////////
using namespace irr;
using namespace core;
using namespace scene;
using namespace video;
using namespace io;
using namespace gui;
/////////////////////////////////////////////////////////////////////////////////////////////////////////////


// structures ///////////////////////////////////////////////////////////////////////////////////////////////
typedef struct MyFile *FilePtr;
typedef struct MyStream *StreamPtr;
struct MyStream {
	AVCodecContext *CodecCtx;
	int StreamIdx;

	char *Data;
	size_t DataSize;
	size_t DataSizeMax;
	char *DecodedData;
	size_t DecodedDataSize;

	FilePtr parent;
};

struct MyFile {
	AVFormatContext *FmtCtx;
	StreamPtr *Streams;
	size_t StreamsSize;
}; //////////////////////////////////////////////////////////////////////////////////////////////////////////

enum ePlaystate { Closed, Playing, Paused, Stopped };

// main class definition ////////////////////////////////////////////////////////////////////////////////////
class murmuurVIDEO {
private: ////////////////////////////////////////////////////////////////////////////////////////////////////    
	irr::ITimer *_itTimer;
    irr::video::IVideoDriver *_vdVideoDriver;
    irr::video::IImage *_imCurrentImage;
    irr::video::ITexture *_txCurrentTexture;

	FilePtr _fpFile;
    StreamPtr _spStreamA, _spStreamV;
	bool _bHasAudio, _bHasVideo;
	int _iDesiredH;
	int _iDesiredW;
	
    unsigned long _lLastTime;	
	std::vector<AVFrame> _frFrame_Buffer;    
	bool _bFrameDisplayed;
	AVFrame *_frFrame;
    AVFrame *_frFrameRGB;
    int _iNumBytes;
    uint8_t *_iBuffer;
	s32* _p;
    s32* _pimage;
	
#ifdef SOUND_OPENAL
    ALuint _aiBuffers[NUM_BUFFERS];
    ALuint _aiSource;
    ALint _aiState; 
    ALbyte *_abData;                   
	ALenum _aeOldFormat;
	ALenum _aeFormat;
#endif
    int _iBuffCount; 
    int _iOld_rate;        
    int _iChannels;
    int _iBits;
    int _iRate;
    int _iBasetime;
	
	bool _initAV(void);
	FilePtr _openAVFile(const char *fname);
	void _closeAVFile(FilePtr file);	
	bool _DumpFrame(AVFrame *pFrame, int width, int height, bool needResize);
	StreamPtr _getAVAudioStream(FilePtr file, int streamnum);
	StreamPtr _getAVVideoStream(FilePtr file, int streamnum);
	int _getAVAudioInfo(StreamPtr stream, int *rate, int *channels, int *bits);
	bool _getNextPacket(FilePtr file, int streamidx);
	int _getAVAudioData(StreamPtr stream, void *data, int length);
	AVFrame *_getNextFrame(void);
public: /////////////////////////////////////////////////////////////////////////////////////////////////////
	double dSecondsPerFrame;
	float fFramerate;
	float fDuration;
	int iActualFrame;
	int iNum_frames;
	ePlaystate psVideostate;
	IMeshSceneNode *mnOutputMesh;
	bool bVideoLoaded;

	murmuurVIDEO(irr::video::IVideoDriver *irrVideoDriver, irr::ITimer *timer, int desiredW, int desiredH);
	murmuurVIDEO(irr::video::IVideoDriver *irrVideoDriver, irr::ITimer *timer, int desiredW, int desiredH, IMeshSceneNode *outputMesh);	
	bool open(core::stringc sFileName);
	bool refresh(void);
	void drawVideoTexture(void);
	void changeResolution(int w, int h);	
	void close(void);
    ~murmuurVIDEO();
}; //////////////////////////////////////////////////////////////////////////////////////////////////////////
#endif //////////////////////////////////////////////////////////////////////////////////////////////////////
murmuurVIDEO.cpp

Code: Select all

// header ///////////////////////////////////////////////////////////////////////////////////////////////////
#include <murmuurVIDEO.h>
/////////////////////////////////////////////////////////////////////////////////////////////////////////////


// audio hard quit //////////////////////////////////////////////////////////////////////////////////////////
static volatile int quitnow = 0;
static void handle_sigint(int signum) {
    (void)signum;
    quitnow = 1;
} ///////////////////////////////////////////////////////////////////////////////////////////////////////////


// constructor default //////////////////////////////////////////////////////////////////////////////////////
murmuurVIDEO::murmuurVIDEO(irr::video::IVideoDriver *irrVideoDriver, irr::ITimer *timer,  
						   int desiredW, int desiredH) : _vdVideoDriver(irrVideoDriver), 
						   _itTimer(timer), _iDesiredH(desiredH), _iDesiredW(desiredW) {    
	this->mnOutputMesh = NULL;
	psVideostate = Closed;
	_initAV();
} ///////////////////////////////////////////////////////////////////////////////////////////////////////////


// constructor alternate mesh output ////////////////////////////////////////////////////////////////////////
murmuurVIDEO::murmuurVIDEO(irr::video::IVideoDriver *irrVideoDriver, irr::ITimer *timer, int desiredW, 
						   int desiredH, IMeshSceneNode *outputMesh) : _vdVideoDriver(irrVideoDriver), 
						   _itTimer(timer), _iDesiredH(desiredH), _iDesiredW(desiredW), 
						   mnOutputMesh(outputMesh)  {    
	_initAV();	
} ///////////////////////////////////////////////////////////////////////////////////////////////////////////


// initialise audio/video ///////////////////////////////////////////////////////////////////////////////////
bool murmuurVIDEO::_initAV(void) {
	// initial video flags
	bVideoLoaded = false;
	psVideostate = Closed;
	_vdVideoDriver->setTextureCreationFlag(ETCF_CREATE_MIP_MAPS, false);
    _vdVideoDriver->setTextureCreationFlag(ETCF_ALWAYS_32_BIT, true);
	_txCurrentTexture = NULL;
	_bFrameDisplayed = true;
	_iBasetime = 0;
	iActualFrame = 0;
    
	// Register all formats and codecs
    av_register_all();

#ifdef SOUND_OPENAL
	// signal handler
	if (signal(SIGINT, handle_sigint) == SIG_ERR) {
        fprintf(stderr, "Unable to set handler for SIGINT!\n");
        return false;
    }

	// audio temp buffer
    _abData = (ALbyte *)malloc(BUFFER_SIZE);
    if (!_abData) {
        fprintf(stderr, "Out of memory allocating temp buffer!\n");
        return false;
    }

    // Initialize ALUT with default settings 
    if (alutInit(NULL, NULL) == AL_FALSE) {
        free(_abData);
        fprintf(stderr, "Could not initialize ALUT (%s)!\n", alutGetErrorString(alutGetError()));
        return false;
    }

    // Generate the buffers and source 
    alGenBuffers(NUM_BUFFERS, _aiBuffers);
    if (alGetError() != AL_NO_ERROR) {
        alutExit();
        free(_abData);
        fprintf(stderr, "Could not create buffers...\n");
        return false;
    }
    alGenSources(1, &_aiSource);
    if (alGetError() != AL_NO_ERROR) {
        alDeleteBuffers(NUM_BUFFERS, _aiBuffers);
        alutExit();
        free(_abData);
        fprintf(stderr, "Could not create source...\n");
        return false;
    }

    // Set parameters so mono sources won't distance attenuate 
    alSourcei(_aiSource, AL_SOURCE_RELATIVE, AL_TRUE);
    alSourcei(_aiSource, AL_ROLLOFF_FACTOR, 0);
    if (alGetError() != AL_NO_ERROR) {
        alDeleteSources(1, &_aiSource);
        alDeleteBuffers(NUM_BUFFERS, _aiBuffers);
        alutExit();
        free(_abData);
        fprintf(stderr, "Could not set source parameters...\n");
        return false;
    }
#endif

	return true;
} ///////////////////////////////////////////////////////////////////////////////////////////////////////////


// get the next frame from the buffer ///////////////////////////////////////////////////////////////////////
AVFrame *murmuurVIDEO::_getNextFrame(void) {
	// get more frames if buffer empty
	while (_frFrame_Buffer.size() == 0) {
		if (!_getNextPacket(_spStreamA->parent, _spStreamA->StreamIdx)) {
			break;
		}
	}
	
	// return a frame if we have one
	if (_frFrame_Buffer.size() > 0) { // we have frames
		AVFrame *t = avcodec_alloc_frame();
		*t = _frFrame_Buffer.back();
		_frFrame_Buffer.erase(_frFrame_Buffer.begin());
		return t;
	} else {
		return NULL;
	}	
} ///////////////////////////////////////////////////////////////////////////////////////////////////////////


// refresh audio/video //////////////////////////////////////////////////////////////////////////////////////
bool murmuurVIDEO::refresh(void) {
	static struct SwsContext *img_convert_ctx;
	static int currentX = 0;
	static int currentY = 0;
	bool needResize = false;

#ifdef SOUND_OPENAL
	if (_bHasAudio) {
		// ensure we arnt out of sound
		if (_iBuffCount > 0 && !quitnow) {
			// Check if any buffers on the source are finished playing 
			ALint processed = 0;
			alGetSourcei(_aiSource, AL_BUFFERS_PROCESSED, &processed);
			if (processed == 0) {
				// All buffers are full. Check if the source is still playing.
				// If not, restart it, otherwise, print the time and rest 
				alGetSourcei(_aiSource, AL_SOURCE_STATE, &_aiState);
				if (alGetError() != AL_NO_ERROR) {
					fprintf(stderr, "Error checking source state...\n");
					return false;
				}
				if (_aiState != AL_PLAYING) {
					alSourcePlay(_aiSource);
					if (alGetError() != AL_NO_ERROR) {
						_closeAVFile(_fpFile);
						fprintf(stderr, "Error restarting playback...\n");
						return false;
					}
				} else {
					ALint offset;
					alGetSourcei(_aiSource, AL_SAMPLE_OFFSET, &offset);
					// Add the base time to the offset. Each count of basetime
					// represents one buffer, which is BUFFER_SIZE in bytes 
					offset += _iBasetime * (BUFFER_SIZE/_iChannels*8/_iBits);
					//fprintf(stderr, "\rTime: %d:%05.02f", offset/_iRate/60, (offset%(_iRate*60))/(float)_iRate);
					//alutSleep((ALfloat)0.01);
				}

				// all done for this iteration
				//return true;
				goto allaudiodone;
			}

			// Read the next chunk of data and refill the oldest buffer 
			_iBuffCount = _getAVAudioData(_spStreamA, _abData, BUFFER_SIZE);
			if (_iBuffCount > 0) {
				ALuint buf = 0;
				alSourceUnqueueBuffers(_aiSource, 1, &buf);
				if (buf != 0) {
					alBufferData(buf, _aeFormat, _abData, _iBuffCount, _iRate);
					alSourceQueueBuffers(_aiSource, 1, &buf);
					// For each successfully unqueued buffer, increment the
					// base time. The retrieved sample offset for timing is
					// relative to the start of the buffer queue, so for every
					// buffer that gets unqueued we need to increment the base
					// time to keep the reported time accurate and not fall backwards 
					_iBasetime++;
				}
				if (alGetError() != AL_NO_ERROR) {
					fprintf(stderr, "Error buffering data...\n");
					return false;
				}
			}
		} else { // out of audio
			return false;
		}	
	}
#endif

allaudiodone:	
	if (_bHasVideo) {
		// process the next video frame from the buffer		
		if (_itTimer->getRealTime() - _lLastTime > (dSecondsPerFrame*1000)) {
			_lLastTime = _itTimer->getRealTime();
			_frFrame = _getNextFrame();
			if (_frFrame != NULL) {
				if (img_convert_ctx == NULL) {
					currentX = _iDesiredW;
					currentY = _iDesiredH;

					int w = _spStreamV->CodecCtx->width;
					int h = _spStreamV->CodecCtx->height;

					img_convert_ctx = sws_getContext(w, h, _spStreamV->CodecCtx->pix_fmt, _iDesiredW, _iDesiredH, PIX_FMT_RGB32, 
						SWS_FAST_BILINEAR | SWS_CPU_CAPS_MMX2, NULL, NULL, NULL);
					if (img_convert_ctx == NULL) {
						fprintf(stderr, "Cannot initialize the conversion context!\n");
						return false;
					}
				} else if (currentX != _iDesiredW || currentY != _iDesiredH) {
					needResize = true;
					currentX = _iDesiredW;
					currentY = _iDesiredH;

					int w = _spStreamV->CodecCtx->width;
					int h = _spStreamV->CodecCtx->height;

					sws_freeContext(img_convert_ctx);
					img_convert_ctx = NULL;

					img_convert_ctx = sws_getContext(w, h, _spStreamV->CodecCtx->pix_fmt, _iDesiredW, _iDesiredH, PIX_FMT_RGB32, 
						SWS_FAST_BILINEAR | SWS_CPU_CAPS_MMX2, NULL, NULL, NULL);
					if (img_convert_ctx == NULL) {
						fprintf(stderr, "Cannot re-initialize the conversion context!\n");
						return false;
					}
				}

				sws_scale(img_convert_ctx, _frFrame->data, _frFrame->linesize, 0, _spStreamV->CodecCtx->height, _frFrameRGB->data, _frFrameRGB->linesize);

				//printf("Dumping Frame: %d  ::  FrameRate: %f\n", iActualFrame, fFramerate);

				// Dump the frame
				_DumpFrame(_frFrameRGB, _iDesiredW, _iDesiredH, needResize);

				// increase frame/time counts
				iActualFrame++;
				
				_bFrameDisplayed = false;
			} else {
				return false;
			}
		} 
	}

	// success, more audio/video to follow
	return true;
} ///////////////////////////////////////////////////////////////////////////////////////////////////////////


// draw the current texture onscreen ////////////////////////////////////////////////////////////////////////
void murmuurVIDEO::drawVideoTexture(void) {
	if (_txCurrentTexture != NULL) {
		if (mnOutputMesh != NULL) {
			if (!_bFrameDisplayed) {
				mnOutputMesh->setMaterialTexture(0, _txCurrentTexture);
				_bFrameDisplayed = true;
			}
		} else {
			_vdVideoDriver->draw2DImage(_txCurrentTexture, irr::core::position2d<irr::s32>(0,0),
				irr::core::rect<irr::s32>(0,0,_iDesiredW,_iDesiredH), 0, irr::video::SColor(255,255,255,255), false);
		}
	}
} ///////////////////////////////////////////////////////////////////////////////////////////////////////////


// refresh audio/video //////////////////////////////////////////////////////////////////////////////////////
bool murmuurVIDEO::open(core::stringc sFileName) {
	// reset if already playing
	if (psVideostate != Closed) {
		close();
		_initAV();
	}

	// reset initial vars
	_aeFormat = 0;
	_iBasetime = 0;

    // check the file opens up
    _fpFile = _openAVFile(sFileName.c_str());
	if (!_fpFile) {
		fprintf(stderr, "Could not open %s\n", sFileName.c_str());
        return false;
	}	

	// create the stream objects
	_spStreamV = _getAVVideoStream(_fpFile, 0);
	_bHasVideo = !(!_spStreamV);
    /*if (!_spStreamV) {
        _closeAVFile(_fpFile);
        fprintf(stderr, "Could not open video in %s\n", sFileName.c_str());
        return false;
    }*/
    _spStreamA = _getAVAudioStream(_fpFile, 0);
	_bHasAudio = !(!_spStreamA);
    if (!_bHasAudio && _bHasVideo) {
        //_closeAVFile(_fpFile);
        //fprintf(stderr, "Could not open audio in %s\n", sFileName.c_str());
        //return false;
		_spStreamA = (StreamPtr)calloc(1, sizeof(*_spStreamA));
		_spStreamA->parent = _fpFile;		
		_spStreamA->StreamIdx = -1;
		void *temp = realloc(_fpFile->Streams, (_fpFile->StreamsSize+1) * sizeof(*_fpFile->Streams));		
		_fpFile->Streams = (StreamPtr*)temp;
		_fpFile->Streams[_fpFile->StreamsSize++] = _spStreamA;
    }
    
	// audio specific open init
	if (_bHasAudio) {
		// Get the stream format, and figure out the OpenAL format. We use the
		// AL_EXT_MCFORMATS extension to provide output of 4 and 5.1 audio streams 
		if (_getAVAudioInfo(_spStreamA, &_iRate, &_iChannels, &_iBits) != 0) {
			_closeAVFile(_fpFile);
			fprintf(stderr, "Error getting audio info for %s\n", sFileName.c_str());
			return false;
		}

#ifdef SOUND_OPENAL
		// determine the sound formats
		if (_iBits == 8) {
			if (_iChannels == 1) _aeFormat = AL_FORMAT_MONO8;
			if (_iChannels == 2) _aeFormat = AL_FORMAT_STEREO8;
			if (alIsExtensionPresent("AL_EXT_MCFORMATS")) {
				if (_iChannels == 4) _aeFormat = alGetEnumValue("AL_FORMAT_QUAD8");
				if (_iChannels == 6) _aeFormat = alGetEnumValue("AL_FORMAT_51CHN8");
			}
		}
		if (_iBits == 16) {
			if (_iChannels == 1) _aeFormat = AL_FORMAT_MONO16;
			if (_iChannels == 2) _aeFormat = AL_FORMAT_STEREO16;
			if (alIsExtensionPresent("AL_EXT_MCFORMATS")) {
				if (_iChannels == 4) _aeFormat = alGetEnumValue("AL_FORMAT_QUAD16");
				if (_iChannels == 6) _aeFormat = alGetEnumValue("AL_FORMAT_51CHN16");
			}
		}
		if (_aeFormat == 0) {
			_closeAVFile(_fpFile);
			fprintf(stderr, "Unhandled format (%d channels, %d bits) for %s", _iChannels, _iBits, sFileName.c_str());
			return false;
		}

		// If the format of the last file matches the current one, we can skip
		// the initial load and let the processing loop take over (gap-less playback!) 
		_iBuffCount = 1;
		if (_aeFormat != _aeOldFormat || _iRate != _iOld_rate) {
			int j;

			_aeOldFormat = _aeFormat;
			_iOld_rate = _iRate;

			// Wait for the last song to finish playing 
			do {
				alutSleep((ALfloat)0.01);
				alGetSourcei(_aiSource, AL_SOURCE_STATE, &_aiState);
			} while(alGetError() == AL_NO_ERROR && _aiState == AL_PLAYING);

			// Rewind the source position and clear the buffer queue 
			alSourceRewind(_aiSource);
			alSourcei(_aiSource, AL_BUFFER, 0);

			// Fill and queue the buffers 
			for(j = 0;j < NUM_BUFFERS;j++) {
				// Make sure we get some data to give to the buffer 
				_iBuffCount = _getAVAudioData(_spStreamA, _abData, BUFFER_SIZE);
				if(_iBuffCount <= 0) return false;

				// Buffer the data with OpenAL and queue the buffer onto the source 
				alBufferData(_aiBuffers[j], _aeFormat, _abData, _iBuffCount, _iRate);
				alSourceQueueBuffers(_aiSource, 1, &_aiBuffers[j]);
			}
			if (alGetError() != AL_NO_ERROR) {
				_closeAVFile(_fpFile);
				fprintf(stderr, "Error buffering initial data...\n");
				return false;
			}

			// Now start playback! 
			alSourcePlay(_aiSource);
			if (alGetError() != AL_NO_ERROR) {
				_closeAVFile(_fpFile);
				fprintf(stderr, "Error starting playback...\n");
				return false;
			}
		} else {
			// When skipping the initial load of a file (because the previous
			// one is using the same exact format), set the base time to the
			// negative of the queued buffers. This is so the timing will be
			// from the beginning of this file, which won't start playing until
			// the next buffer to get queued does */
			_iBasetime = -NUM_BUFFERS;
		}
#endif
	}

	// video specific checks
	if (_bHasVideo) {
		if (mnOutputMesh != NULL) {
			mnOutputMesh->setVisible(true);
		}
	}

	// set state
	if (_bHasVideo || _bHasAudio) {
		psVideostate = Playing;
		bVideoLoaded = true;
		return true;
	} else {
		return false;
	}
} ///////////////////////////////////////////////////////////////////////////////////////////////////////////


// dump the frame to texture ////////////////////////////////////////////////////////////////////////////////
bool murmuurVIDEO::_DumpFrame(AVFrame *pFrame, int width, int height, bool needResize) {
    static char first_time = 1;

    if (first_time) {
		_imCurrentImage = _vdVideoDriver->createImageFromData(irr::video::ECF_A8R8G8B8,
                       irr::core::dimension2d<irr::u32>(width, height),
                       pFrame->data[0],
                       true);
        first_time = 0;
        _txCurrentTexture = _vdVideoDriver->addTexture("movie", _imCurrentImage);
    }

    if (needResize) {
       _vdVideoDriver->removeTexture(_txCurrentTexture);
       _imCurrentImage = _vdVideoDriver->createImageFromData(irr::video::ECF_A8R8G8B8,
                              irr::core::dimension2d<irr::u32>(width, height),
                              pFrame->data[0],
                              true);
        _txCurrentTexture = _vdVideoDriver->addTexture("movie", _imCurrentImage);
    }

    _p = (s32*)_txCurrentTexture->lock ();
    _pimage = (s32*)_imCurrentImage->lock ();

    for (int i = 0; i < width*height; i++) _p[i] = _pimage[i];

    // unlock de texture and the image
    _txCurrentTexture->unlock();
    _imCurrentImage->unlock();

	return true;
} ///////////////////////////////////////////////////////////////////////////////////////////////////////////


// internal open file ///////////////////////////////////////////////////////////////////////////////////////
FilePtr murmuurVIDEO::_openAVFile(const char *fname) {
    static int done = 0;
    FilePtr file;

    // We need to make sure ffmpeg is initialized. Optionally silence warning output from the lib 
    if(!done) {av_register_all();
    av_log_set_level(AV_LOG_ERROR);}
    done = 1;

    file = (FilePtr)calloc(1, sizeof(*file));
    if (file && av_open_input_file(&file->FmtCtx, fname, NULL, 0, NULL) == 0) {
        // After opening, we must search for the stream information because not
        // all formats will have it in stream headers (eg. system MPEG streams)
        if (av_find_stream_info(file->FmtCtx) >= 0) return file;
        av_close_input_file(file->FmtCtx);
    }
    free(file);
    return NULL;
} ///////////////////////////////////////////////////////////////////////////////////////////////////////////


// internal close file //////////////////////////////////////////////////////////////////////////////////////
void murmuurVIDEO::_closeAVFile(FilePtr file) {
    size_t i;

    if(!file) return;

    for(i = 0;i < file->StreamsSize;i++) {
        avcodec_close(file->Streams[i]->CodecCtx);
        free(file->Streams[i]->Data);
        free(file->Streams[i]->DecodedData);
        free(file->Streams[i]);
    }
    free(file->Streams);

    av_close_input_file(file->FmtCtx);
    free(file);
} ///////////////////////////////////////////////////////////////////////////////////////////////////////////


// internal find the relevent streams ///////////////////////////////////////////////////////////////////////
StreamPtr murmuurVIDEO::_getAVAudioStream(FilePtr file, int streamnum) {
    unsigned int i;
    if (!file) return NULL;
    for(i = 0;i < file->FmtCtx->nb_streams;i++) {
        if (file->FmtCtx->streams[i]->codec->codec_type != CODEC_TYPE_AUDIO) continue;

        if (streamnum == 0) {
            StreamPtr stream;
            AVCodec *codec;
            void *temp;
            size_t j;

            // Found the requested stream. Check if a handle to this stream
            // already exists and return it if it does 
            for(j = 0;j < file->StreamsSize;j++) {
                if (file->Streams[j]->StreamIdx == (int)i) return file->Streams[j];
            }

            // Doesn't yet exist. Now allocate a new stream object and fill in its info 
            stream = (StreamPtr)calloc(1, sizeof(*stream));
            if (!stream) return NULL;
            stream->parent = file;
            stream->CodecCtx = file->FmtCtx->streams[i]->codec;
            stream->StreamIdx = i;

            // Try to find the codec for the given codec ID, and open it 
            codec = avcodec_find_decoder(stream->CodecCtx->codec_id);
            if (!codec || avcodec_open(stream->CodecCtx, codec) < 0) {
                free(stream);
                return NULL;
            }

            // Allocate space for the decoded data to be stored in before it gets passed to the app 
            stream->DecodedData = (char *)malloc(AVCODEC_MAX_AUDIO_FRAME_SIZE);
            if (!stream->DecodedData) {
                avcodec_close(stream->CodecCtx);
                free(stream);
                return NULL;
            }

            // Append the new stream object to the stream list. The original
            // pointer will remain valid if realloc fails, so we need to use
            // another pointer to watch for errors and not leak memory 
            temp = realloc(file->Streams, (file->StreamsSize+1) * sizeof(*file->Streams));
            if (!temp) {
                avcodec_close(stream->CodecCtx);
                free(stream->DecodedData);
                free(stream);
                return NULL;
            }
            file->Streams = (StreamPtr*)temp;
            file->Streams[file->StreamsSize++] = stream;
            return stream;
        }
        streamnum--;
    }	
    return NULL;
}
StreamPtr murmuurVIDEO::_getAVVideoStream(FilePtr file, int streamnum) {
    unsigned int i;
    if (!file) return NULL;
    for(i = 0;i < file->FmtCtx->nb_streams;i++) {
        if (file->FmtCtx->streams[i]->codec->codec_type != CODEC_TYPE_VIDEO) continue;

        if (streamnum == 0) {
            StreamPtr stream;
            AVCodec *codec;
            void *temp;
            size_t j;

            // Found the requested stream. Check if a handle to this stream
            // already exists and return it if it does 
            for(j = 0;j < file->StreamsSize;j++) {
                if (file->Streams[j]->StreamIdx == (int)i) return file->Streams[j];
            }

            // Doesn't yet exist. Now allocate a new stream object and fill in its info 
            stream = (StreamPtr)calloc(1, sizeof(*stream));
            if (!stream) return NULL;
            stream->parent = file;
            stream->CodecCtx = file->FmtCtx->streams[i]->codec;
            stream->StreamIdx = i;

            // Try to find the codec for the given codec ID, and open it 
            codec = avcodec_find_decoder(stream->CodecCtx->codec_id);
			if (codec->capabilities & CODEC_CAP_TRUNCATED) stream->CodecCtx->flags|=CODEC_FLAG_TRUNCATED;
            if (!codec || avcodec_open(stream->CodecCtx, codec) < 0) {
                free(stream);
                return NULL;
            }

			// get the movie framerate
			fFramerate = (float)file->FmtCtx->streams[i]->r_frame_rate.num;
			dSecondsPerFrame = (double)file->FmtCtx->streams[i]->r_frame_rate.den / 
				file->FmtCtx->streams[i]->r_frame_rate.num;
			fDuration = (float)file->FmtCtx->streams[i]->duration;
			iNum_frames = (int)file->FmtCtx->streams[i]->nb_frames;

			// setup temp allocations			
			_frFrame = avcodec_alloc_frame();
			_frFrameRGB=avcodec_alloc_frame();
			if (_frFrameRGB == NULL) return NULL;
			_iNumBytes = avpicture_get_size(PIX_FMT_RGB32, _iDesiredW, _iDesiredH);
			_iBuffer = new uint8_t[_iNumBytes];
			avpicture_fill((AVPicture *)_frFrameRGB, _iBuffer, PIX_FMT_RGB32, _iDesiredW, _iDesiredH);

            // Append the new stream object to the stream list. The original
            // pointer will remain valid if realloc fails, so we need to use
            // another pointer to watch for errors and not leak memory 
            temp = realloc(file->Streams, (file->StreamsSize+1) * sizeof(*file->Streams));
            if (!temp) {
                avcodec_close(stream->CodecCtx);
                free(stream->DecodedData);
                free(stream);
                return NULL;
            }
            file->Streams = (StreamPtr*)temp;
            file->Streams[file->StreamsSize++] = stream;
            return stream;
        }
        streamnum--;
    }
    return NULL;
} ///////////////////////////////////////////////////////////////////////////////////////////////////////////


// internal grab audio stream bits etc //////////////////////////////////////////////////////////////////////
int murmuurVIDEO::_getAVAudioInfo(StreamPtr stream, int *rate, int *channels, int *bits) {
    if (!stream || stream->CodecCtx->codec_type != CODEC_TYPE_AUDIO) return 1;

    if (rate) *rate = stream->CodecCtx->sample_rate;
    if (channels) *channels = stream->CodecCtx->channels;
    if (bits) *bits = 16;

    return 0;
} ///////////////////////////////////////////////////////////////////////////////////////////////////////////


// internal get next packet /////////////////////////////////////////////////////////////////////////////////
bool murmuurVIDEO::_getNextPacket(FilePtr file, int streamidx) {
    static AVPacket packet;
    static int bytesRemaining=0;
    static uint8_t  *rawData;
    static int bytesDecoded;
    static int frameFinished;
	AVFrame *pFrame;
	pFrame = avcodec_alloc_frame();

	// read frames until we have an audio packet to return
    while(av_read_frame(file->FmtCtx, &packet) >= 0) {
        StreamPtr *iter = file->Streams;
        size_t i;

        // Check each stream the user has a handle for, looking for the one this packet belongs to 
        for(i = 0;i < file->StreamsSize;i++,iter++) {
            if ((*iter)->StreamIdx == packet.stream_index) {
				if (packet.stream_index == streamidx) {  // audio packets		
					size_t idx = (*iter)->DataSize;

					// Found the stream. Grow the input data buffer as needed to
					// hold the new packet's data. Additionally, some ffmpeg codecs
					// need some padding so they don't overread the allocated buffer
					if (idx+packet.size > (*iter)->DataSizeMax) {
						void *temp = realloc((*iter)->Data, idx+packet.size + FF_INPUT_BUFFER_PADDING_SIZE);
						if (!temp) break;
						(*iter)->Data = (char *)temp;
						(*iter)->DataSizeMax = idx+packet.size;
					}

					// Copy the packet and free it 
					memcpy(&(*iter)->Data[idx], packet.data, packet.size);
					(*iter)->DataSize += packet.size;

					// Return if this stream is what we needed a packet for 
					if (streamidx == (*iter)->StreamIdx) {
						av_free_packet(&packet);
						return true;
					}					
					break;
				} else if (_bHasVideo) {  // continue decoding video frames to the buffer
					if (packet.stream_index == _spStreamV->StreamIdx) {
						bytesRemaining += packet.size;
						rawData = packet.data;
						
						// Work on the current packet until we have decoded all of it
						while (bytesRemaining > 0) {
							// Decode the next chunk of data
							bytesDecoded = avcodec_decode_video((*iter)->CodecCtx, pFrame, &frameFinished, rawData, bytesRemaining);

							// Was there an error?
							if (bytesDecoded < 0) {
								fprintf(stderr, "Error while decoding frame\n");
								//return false;
							}

							bytesRemaining -= bytesDecoded;
							rawData += bytesDecoded;

							// Did we finish the current frame? Then we can return
							if (frameFinished) { // add the current frame to the buffer
								_frFrame_Buffer.push_back(*pFrame);
								av_free(pFrame);
								frameFinished = false;
								if (!_bHasAudio) {
									return true;
								}
								pFrame = avcodec_alloc_frame();								
							}
						}
					}
				}
            }
        }

        // Free the packet and look for another 
        av_free_packet(&packet);
		
    }

	if (pFrame != NULL)
		av_free(pFrame);
	return false;
} ///////////////////////////////////////////////////////////////////////////////////////////////////////////


// internal get audio data //////////////////////////////////////////////////////////////////////////////////
int murmuurVIDEO::_getAVAudioData(StreamPtr stream, void *data, int length) {
    int dec = 0;

    if (!stream || stream->CodecCtx->codec_type != CODEC_TYPE_AUDIO) return 0;
    while(dec < length) {
        // If there's any pending decoded data, deal with it first 
        if (stream->DecodedDataSize > 0) {
            // Get the amount of bytes remaining to be written, 
			// and clamp to the amount of decoded data we have 
            size_t rem = length-dec;
            if (rem > stream->DecodedDataSize) rem = stream->DecodedDataSize;

            // Copy the data to the app's buffer and increment 
            memcpy(data, stream->DecodedData, rem);
            data = (char*)data + rem;
            dec += rem;

            // If there's any decoded data left, move it to the front of the
            // buffer for next time 
            if (rem < stream->DecodedDataSize)
                memmove(stream->DecodedData, &stream->DecodedData[rem], stream->DecodedDataSize - rem);
            stream->DecodedDataSize -= rem;
        }

        // Check if we need to get more decoded data 
        if (stream->DecodedDataSize == 0) {
            size_t insize;
            int size;
            int len;

            insize = stream->DataSize;
            if (insize == 0) {
                _getNextPacket(stream->parent, stream->StreamIdx);
                
				// If there's no more input data, break and return what we have 
                if (insize == stream->DataSize) break;
                insize = stream->DataSize;
                memset(&stream->Data[insize], 0, FF_INPUT_BUFFER_PADDING_SIZE);
            }

            // Clear the input padding bits 
            // Decode some data, and check for errors 
            size = AVCODEC_MAX_AUDIO_FRAME_SIZE;
            while((len=avcodec_decode_audio2(stream->CodecCtx, (int16_t*)stream->DecodedData, 
					&size, (uint8_t*)stream->Data, insize)) == 0) {
                
				if (size > 0) break;
                _getNextPacket(stream->parent, stream->StreamIdx);
                if (insize == stream->DataSize) break;
                insize = stream->DataSize;
                memset(&stream->Data[insize], 0, FF_INPUT_BUFFER_PADDING_SIZE);
            }

            if (len < 0) break;

            if (len > 0) {
                // If any input data is left, move it to the start of the
                // buffer, and decrease the buffer size 
                size_t rem = insize-len;
                if (rem) memmove(stream->Data, &stream->Data[len], rem);
                stream->DataSize = rem;
            }
            // Set the output buffer size 
            stream->DecodedDataSize = size;
        }
    }

    // Return the number of bytes we were able to get 
    return dec;
} ///////////////////////////////////////////////////////////////////////////////////////////////////////////


// switch res ///////////////////////////////////////////////////////////////////////////////////////////////
void murmuurVIDEO::changeResolution(int w, int h) {
	if (_iDesiredW != w || _iDesiredH != h){
		std::cout << "Changing resolution from ["<< _iDesiredW << "x" << _iDesiredH << "] to [" << w << "x" << h << "]" << std::endl;

		_iDesiredW = w;
		_iDesiredH = h;

		delete [] _iBuffer;
		//av_free((AVPicture *)_frFrameRGB);
		_iNumBytes = avpicture_get_size(PIX_FMT_RGB32, _iDesiredW, _iDesiredH);

		_iBuffer = new uint8_t[_iNumBytes];

		// Assign appropriate parts of buffer to image planes in pFrameRGB
		avpicture_fill((AVPicture *)_frFrameRGB, _iBuffer, PIX_FMT_RGB32, _iDesiredW, _iDesiredH);
	}
} ///////////////////////////////////////////////////////////////////////////////////////////////////////////


// close the current active file ////////////////////////////////////////////////////////////////////////////
void murmuurVIDEO::close(void) {
	// Free the RGB image
	if (_iBuffer != NULL)
		delete [] _iBuffer;

	if (_frFrameRGB != NULL)
		av_free(_frFrameRGB);

	// Free the YUV frame
	if (_frFrame != NULL)
		av_free(_frFrame);

	// clear the frame buffer
	_frFrame_Buffer.clear();
	_vdVideoDriver->removeTexture(_txCurrentTexture);

#ifdef SOUND_OPENAL
	// All data has been streamed in. Wait until the source stops playing it 
    do {
        alutSleep((ALfloat)0.01);
        alGetSourcei(_aiSource, AL_SOURCE_STATE, &_aiState);
    } while(alGetError() == AL_NO_ERROR && _aiState == AL_PLAYING);

    // All files done. Delete the source and buffers, and close OpenAL 
    alDeleteSources(1, &_aiSource);
    alDeleteBuffers(NUM_BUFFERS, _aiBuffers);
    alutExit();
    free(_abData);
#endif

	// close the file
	_closeAVFile(_fpFile);
    fprintf(stderr, "\nDone.\n");

	bVideoLoaded = false;
	psVideostate = Closed;
} ///////////////////////////////////////////////////////////////////////////////////////////////////////////


// destruct /////////////////////////////////////////////////////////////////////////////////////////////////
murmuurVIDEO::~murmuurVIDEO() {	
	if (psVideostate != Closed) {
		close();
	}
} ///////////////////////////////////////////////////////////////////////////////////////////////////////////
example usage::create the player/output mesh

Code: Select all

base->videoOutput = base->smgr->addCubeSceneNode(1,0,-1,vector3df(0,0,0),vector3df(0,0,0),vector3df(774,578,1));
								base->videoOutput->setMaterialFlag(EMF_LIGHTING, false);
						base->videoOutput->setMaterialTexture( 0, base->driver->getTexture("theeye.jpg") );
videoPlayer = new murmuurVIDEO(
									base->driver,
									base->device->getTimer(),
									1024,
									768, 
									base->videoOutput);

example usage::open a video, destroying the player if it has video

Code: Select all

if (videoPlayer->bVideoLoaded) {
									videoPlayer->close();
									delete videoPlayer;
									videoPlayer = new murmuurVIDEO(
										base->driver,
										base->device->getTimer(),
										1024,
										768, 
										base->videoOutput);
									videoPlayer->mnOutputMesh = base->videoOutput;
								}
								videoPlayer->open(base->filename.c_str());
example usage::the refresh loop

Code: Select all

// play onwards if active
								if (videoPlayer->psVideostate == Playing) {  // play active
									if (!videoPlayer->refresh()) {  // no more AV
										videoPlayer->psVideostate = Stopped;
									} else {  // still playing so draw the next frame
										videoPlayer->drawVideoTexture();
									}								
								}
example usage::pause playback

Code: Select all

videoPlayer->psVideostate = Paused;
example usage::resume playback

Code: Select all

videoPlayer->psVideostate = Playing;
example usage::stop playback

Code: Select all

if (videoPlayer->bVideoLoaded) {
									videoPlayer->close();
									delete videoPlayer;
									videoPlayer = new murmuurVIDEO(
										base->driver,
										base->device->getTimer(),
										base->uResWidth_Avail.at(base->uResIndex),
										base->uResHeight_Avail.at(base->uResIndex), 
										base->videoOutput);
									videoPlayer->mnOutputMesh = base->videoOutput;
								}
as previously mentioned, this code has lots of raw edges, if you find any not mentioned please leave a post
ellemayo
Posts: 15
Joined: Tue Oct 06, 2009 6:24 pm

Post by ellemayo »

have you found that any specific audio/video encoding works best with the player?
thespecial1
Posts: 135
Joined: Thu Oct 30, 2008 11:56 am
Location: UK
Contact:

Post by thespecial1 »

ellemayo wrote:have you found that any specific audio/video encoding works best with the player?
mpeg layer4 avi coupled with layer3 audio seems to work flawlessly, see ffmpegs main page for supported list
Post Reply