Has taken some time I definatly know alot more about video/audio playback, now that I am at the top of the learning curve, here is an unfinished version of the class, is missing stuff like pause/stop/rewind and probably need some memory clean up work.
the openAL code can be disabled via a #define in the header, if someone mods this for one of the other sound choices please pass the code back to me so that I can merge it in.
videoplayer.h
Code: Select all
#ifndef __VIDEO_PLAYER_H__ ////////////////////////////////////////////////////////////////////////////////////////////////////////////////
///////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////
// Original code sources from juliusctw, Hansel, stepan1117
// Heavily Modified/Merged by theSpecial1
///////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////
// defines ////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////
#define __VIDEO_PLAYER_H__
#define NUM_BUFFERS 3
#define BUFFER_SIZE 19200
#define SOUND_OPENAL
///////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////
// includes ///////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////
#include <irrlicht.h>
#include <vector>
#include <stdlib.h>
#include <stdio.h>
#include <iostream>
extern "C" {
#include <string.h>
#include <avcodec.h>
#include <avformat.h>
#include <swscale.h>
#ifdef SOUND_OPENAL
#include <signal.h>
#include <AL/al.h>
#include <AL/alc.h>
#include <AL/alut.h>
#endif
} /////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////
// namespaces /////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////
using namespace irr;
using namespace core;
using namespace scene;
using namespace video;
using namespace io;
using namespace gui;
///////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////
// structures /////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////
typedef struct MyFile *FilePtr;
typedef struct MyStream *StreamPtr;
struct MyStream {
AVCodecContext *CodecCtx;
int StreamIdx;
char *Data;
size_t DataSize;
size_t DataSizeMax;
char *DecodedData;
size_t DecodedDataSize;
FilePtr parent;
};
struct MyFile {
AVFormatContext *FmtCtx;
StreamPtr *Streams;
size_t StreamsSize;
}; ////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////
// main class definition //////////////////////////////////////////////////////////////////////////////////////////////////////////////////
class cVideoPlayer {
private: //////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////
irr::ITimer *Timer;
irr::video::IVideoDriver *IrrVideoDriver;
irr::video::IImage *CurrentImage;
irr::video::ITexture *CurrentTexture;
FilePtr file;
StreamPtr streamA, streamV;
unsigned long lastTime;
double SecondsPerFrame;
float framerate;
int actualFrame;
int desiredH, desiredW;
IMeshSceneNode *outputMesh;
std::vector<AVFrame> Frame_Buffer;
bool bFrameDisplayed;
AVFrame *Frame;
AVFrame *FrameRGB;
int NumBytes;
uint8_t *Buffer;
s32* p;
s32* pimage;
#ifdef SOUND_OPENAL
ALuint buffers[NUM_BUFFERS];
ALuint source;
ALint state;
ALbyte *data;
ALenum old_format;
ALenum format;
#endif
int count;
int i;
int old_rate;
int channels;
int bits;
int rate;
int basetime;
bool initAV(void);
FilePtr openAVFile(const char *fname);
void closeAVFile(FilePtr file);
bool DumpFrame(AVFrame *pFrame, int width, int height, bool needResize);
StreamPtr getAVAudioStream(FilePtr file, int streamnum);
StreamPtr getAVVideoStream(FilePtr file, int streamnum);
int getAVAudioInfo(StreamPtr stream, int *rate, int *channels, int *bits);
bool getNextPacket(FilePtr file, int streamidx);
int getAVAudioData(StreamPtr stream, void *data, int length);
AVFrame *getNextFrame(void);
public: ///////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////
cVideoPlayer(irr::video::IVideoDriver *irrVideoDriver, irr::ITimer *timer, int desiredW, int desiredH);
cVideoPlayer(irr::video::IVideoDriver *irrVideoDriver, irr::ITimer *timer, int desiredW, int desiredH, IMeshSceneNode *outputMesh);
bool open(core::stringc sFileName);
bool refresh(void);
void drawVideoTexture(void);
void changeResolution(int w, int h);
irr::video::ITexture* getVideoTexture(void);
float getFrameRate(void);
~cVideoPlayer();
}; ////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////
#endif ////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////
videoplayer.cpp
Code: Select all
// header /////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////
#include "videoPlayer.h"
///////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////
// audio hard quit ////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////
static volatile int quitnow = 0;
static void handle_sigint(int signum) {
(void)signum;
quitnow = 1;
} /////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////
// constructor default ///////////////////////////////////////////////////////////////////////////////////////////////////////////////////
cVideoPlayer::cVideoPlayer(irr::video::IVideoDriver *irrVideoDriver, irr::ITimer *timer, int desiredW, int desiredH) : IrrVideoDriver(irrVideoDriver), Timer(timer) {
// set private vars
this->desiredH = desiredH;
this->desiredW = desiredW;
this->outputMesh = NULL;
// initial sound/audio config
initAV();
} /////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////
// constructor alternate mesh output //////////////////////////////////////////////////////////////////////////////////////////////////////
cVideoPlayer::cVideoPlayer(irr::video::IVideoDriver *irrVideoDriver, irr::ITimer *timer, int desiredW, int desiredH, IMeshSceneNode *outputMesh) : IrrVideoDriver(irrVideoDriver), Timer(timer) {
// set private vars
this->desiredH = desiredH;
this->desiredW = desiredW;
this->outputMesh = outputMesh;
// initial sound/audio config
initAV();
} /////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////
// initialise audio/video /////////////////////////////////////////////////////////////////////////////////////////////////////////////////
bool cVideoPlayer::initAV(void) {
// initial video flags
IrrVideoDriver->setTextureCreationFlag(ETCF_CREATE_MIP_MAPS, false);
IrrVideoDriver->setTextureCreationFlag(ETCF_ALWAYS_32_BIT, true);
CurrentTexture = NULL;
bFrameDisplayed = true;
// Register all formats and codecs
av_register_all();
#ifdef SOUND_OPENAL
// signal handler
if (signal(SIGINT, handle_sigint) == SIG_ERR) {
fprintf(stderr, "Unable to set handler for SIGINT!\n");
return false;
}
// audio temp buffer
data = (ALbyte *)malloc(BUFFER_SIZE);
if (!data) {
fprintf(stderr, "Out of memory allocating temp buffer!\n");
return false;
}
// Initialize ALUT with default settings
if (alutInit(NULL, NULL) == AL_FALSE) {
free(data);
fprintf(stderr, "Could not initialize ALUT (%s)!\n", alutGetErrorString(alutGetError()));
return false;
}
// Generate the buffers and source
alGenBuffers(NUM_BUFFERS, buffers);
if (alGetError() != AL_NO_ERROR) {
alutExit();
free(data);
fprintf(stderr, "Could not create buffers...\n");
return false;
}
alGenSources(1, &source);
if (alGetError() != AL_NO_ERROR) {
alDeleteBuffers(NUM_BUFFERS, buffers);
alutExit();
free(data);
fprintf(stderr, "Could not create source...\n");
return false;
}
// Set parameters so mono sources won't distance attenuate
alSourcei(source, AL_SOURCE_RELATIVE, AL_TRUE);
alSourcei(source, AL_ROLLOFF_FACTOR, 0);
if (alGetError() != AL_NO_ERROR) {
alDeleteSources(1, &source);
alDeleteBuffers(NUM_BUFFERS, buffers);
alutExit();
free(data);
fprintf(stderr, "Could not set source parameters...\n");
return false;
}
#endif
return true;
} /////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////
// get the next frame from the buffer /////////////////////////////////////////////////////////////////////////////////////////////////////
AVFrame *cVideoPlayer::getNextFrame(void) {
// get more frames if buffer empty
while (Frame_Buffer.size() == 0) {
if (!getNextPacket(streamA->parent, streamA->StreamIdx)) {
break;
}
}
// return a frame if we have one
if (Frame_Buffer.size() > 0) { // we have frames
AVFrame *t = avcodec_alloc_frame();
*t = Frame_Buffer.back();
Frame_Buffer.erase(Frame_Buffer.begin());
return t;
} else {
return NULL;
}
} /////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////
// refresh audio/video ////////////////////////////////////////////////////////////////////////////////////////////////////////////////////
bool cVideoPlayer::refresh(void) {
static struct SwsContext *img_convert_ctx;
static int currentX = 0;
static int currentY = 0;
bool needResize = false;
#ifdef SOUND_OPENAL
// ensure we arnt out of sound
if (count > 0 && !quitnow) {
// Check if any buffers on the source are finished playing
ALint processed = 0;
alGetSourcei(source, AL_BUFFERS_PROCESSED, &processed);
if (processed == 0) {
// All buffers are full. Check if the source is still playing.
// If not, restart it, otherwise, print the time and rest
alGetSourcei(source, AL_SOURCE_STATE, &state);
if (alGetError() != AL_NO_ERROR) {
fprintf(stderr, "Error checking source state...\n");
return false;
}
if (state != AL_PLAYING) {
alSourcePlay(source);
if (alGetError() != AL_NO_ERROR) {
closeAVFile(file);
fprintf(stderr, "Error restarting playback...\n");
return false;
}
} else {
ALint offset;
alGetSourcei(source, AL_SAMPLE_OFFSET, &offset);
// Add the base time to the offset. Each count of basetime
// represents one buffer, which is BUFFER_SIZE in bytes
offset += basetime * (BUFFER_SIZE/channels*8/bits);
//fprintf(stderr, "\rTime: %d:%05.02f", offset/rate/60, (offset%(rate*60))/(float)rate);
//alutSleep((ALfloat)0.01);
}
// all done for this iteration
//return true;
goto allaudiodone;
}
// Read the next chunk of data and refill the oldest buffer
count = getAVAudioData(streamA, data, BUFFER_SIZE);
if (count > 0) {
ALuint buf = 0;
alSourceUnqueueBuffers(source, 1, &buf);
if (buf != 0) {
alBufferData(buf, format, data, count, rate);
alSourceQueueBuffers(source, 1, &buf);
// For each successfully unqueued buffer, increment the
// base time. The retrieved sample offset for timing is
// relative to the start of the buffer queue, so for every
// buffer that gets unqueued we need to increment the base
// time to keep the reported time accurate and not fall backwards
basetime++;
}
if (alGetError() != AL_NO_ERROR) {
fprintf(stderr, "Error buffering data...\n");
return false;
}
}
} else { // out of audio
return false;
}
#endif
allaudiodone:
// process the next video frame from the buffer
if (Timer->getRealTime() - lastTime > (SecondsPerFrame*1000)) {
lastTime = Timer->getRealTime();
Frame = getNextFrame();
if (Frame != NULL) {
if (img_convert_ctx == NULL) {
currentX = desiredW;
currentY = desiredH;
int w = streamV->CodecCtx->width;
int h = streamV->CodecCtx->height;
img_convert_ctx = sws_getContext(w, h, streamV->CodecCtx->pix_fmt, desiredW, desiredH, PIX_FMT_RGB32,
SWS_FAST_BILINEAR | SWS_CPU_CAPS_MMX2, NULL, NULL, NULL);
if (img_convert_ctx == NULL) {
fprintf(stderr, "Cannot initialize the conversion context!\n");
return false;
}
} else if (currentX != desiredW || currentY != desiredH) {
needResize = true;
currentX = desiredW;
currentY = desiredH;
int w = streamV->CodecCtx->width;
int h = streamV->CodecCtx->height;
sws_freeContext(img_convert_ctx);
img_convert_ctx = NULL;
img_convert_ctx = sws_getContext(w, h, streamV->CodecCtx->pix_fmt, desiredW, desiredH, PIX_FMT_RGB32,
SWS_FAST_BILINEAR | SWS_CPU_CAPS_MMX2, NULL, NULL, NULL);
if (img_convert_ctx == NULL) {
fprintf(stderr, "Cannot re-initialize the conversion context!\n");
return false;
}
}
sws_scale(img_convert_ctx, Frame->data, Frame->linesize, 0, streamV->CodecCtx->height, FrameRGB->data, FrameRGB->linesize);
//printf("Dumping Frame: %d :: FrameRate: %f\n", actualFrame, framerate);
// Dump the frame
DumpFrame(FrameRGB, desiredW, desiredH, needResize);
// increase frame/time counts
actualFrame++;
bFrameDisplayed = false;
} else {
return false;
}
}
// success, more audio/video to follow
return true;
} /////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////
// draw the current texture onscreen //////////////////////////////////////////////////////////////////////////////////////////////////////
void cVideoPlayer::drawVideoTexture(void) {
if (CurrentTexture != NULL) {
if (outputMesh != NULL) {
if (!bFrameDisplayed) {
outputMesh->setMaterialTexture(0, CurrentTexture);
bFrameDisplayed = true;
}
} else {
IrrVideoDriver->draw2DImage(CurrentTexture, irr::core::position2d<irr::s32>(0,0),
irr::core::rect<irr::s32>(0,0,desiredW,desiredH), 0, irr::video::SColor(255,255,255,255), false);
}
}
} /////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////
// refresh audio/video ////////////////////////////////////////////////////////////////////////////////////////////////////////////////////
bool cVideoPlayer::open(core::stringc sFileName) {
// reset initial vars
format = 0;
basetime = 0;
// check the file opens up
file = openAVFile(sFileName.c_str());
if (!file) {
fprintf(stderr, "Could not open %s\n", sFileName.c_str());
return false;
}
// create the stream objects
streamV = getAVVideoStream(file, 0);
if (!streamV) {
closeAVFile(file);
fprintf(stderr, "Could not open video in %s\n", sFileName.c_str());
return false;
}
streamA = getAVAudioStream(file, 0);
if (!streamA) {
closeAVFile(file);
fprintf(stderr, "Could not open audio in %s\n", sFileName.c_str());
return false;
}
// Get the stream format, and figure out the OpenAL format. We use the
// AL_EXT_MCFORMATS extension to provide output of 4 and 5.1 audio streams
if (getAVAudioInfo(streamA, &rate, &channels, &bits) != 0) {
closeAVFile(file);
fprintf(stderr, "Error getting audio info for %s\n", sFileName.c_str());
return false;
}
#ifdef SOUND_OPENAL
// determine the sound formats
if (bits == 8) {
if (channels == 1) format = AL_FORMAT_MONO8;
if (channels == 2) format = AL_FORMAT_STEREO8;
if (alIsExtensionPresent("AL_EXT_MCFORMATS")) {
if (channels == 4) format = alGetEnumValue("AL_FORMAT_QUAD8");
if (channels == 6) format = alGetEnumValue("AL_FORMAT_51CHN8");
}
}
if (bits == 16) {
if (channels == 1) format = AL_FORMAT_MONO16;
if (channels == 2) format = AL_FORMAT_STEREO16;
if (alIsExtensionPresent("AL_EXT_MCFORMATS")) {
if (channels == 4) format = alGetEnumValue("AL_FORMAT_QUAD16");
if (channels == 6) format = alGetEnumValue("AL_FORMAT_51CHN16");
}
}
if (format == 0) {
closeAVFile(file);
fprintf(stderr, "Unhandled format (%d channels, %d bits) for %s", channels, bits, sFileName.c_str());
return false;
}
// If the format of the last file matches the current one, we can skip
// the initial load and let the processing loop take over (gap-less playback!)
count = 1;
if (format != old_format || rate != old_rate) {
int j;
old_format = format;
old_rate = rate;
// Wait for the last song to finish playing
do {
alutSleep((ALfloat)0.01);
alGetSourcei(source, AL_SOURCE_STATE, &state);
} while(alGetError() == AL_NO_ERROR && state == AL_PLAYING);
// Rewind the source position and clear the buffer queue
alSourceRewind(source);
alSourcei(source, AL_BUFFER, 0);
// Fill and queue the buffers
for(j = 0;j < NUM_BUFFERS;j++) {
// Make sure we get some data to give to the buffer
count = getAVAudioData(streamA, data, BUFFER_SIZE);
if(count <= 0) return false;
// Buffer the data with OpenAL and queue the buffer onto the source
alBufferData(buffers[j], format, data, count, rate);
alSourceQueueBuffers(source, 1, &buffers[j]);
}
if (alGetError() != AL_NO_ERROR) {
closeAVFile(file);
fprintf(stderr, "Error buffering initial data...\n");
return false;
}
// Now start playback!
alSourcePlay(source);
if (alGetError() != AL_NO_ERROR) {
closeAVFile(file);
fprintf(stderr, "Error starting playback...\n");
return false;
}
} else {
// When skipping the initial load of a file (because the previous
// one is using the same exact format), set the base time to the
// negative of the queued buffers. This is so the timing will be
// from the beginning of this file, which won't start playing until
// the next buffer to get queued does */
basetime = -NUM_BUFFERS;
}
#endif
//fprintf(stderr, "\rPlaying %s (%d-bit, %d channels, %dhz)\n", sFileName.c_str(), bits, channels, rate);
return true;
} /////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////
// dump the frame to texture //////////////////////////////////////////////////////////////////////////////////////////////////////////////
bool cVideoPlayer::DumpFrame(AVFrame *pFrame, int width, int height, bool needResize) {
static char first_time = 1;
if (first_time) {
CurrentImage = IrrVideoDriver->createImageFromData(irr::video::ECF_A8R8G8B8,
irr::core::dimension2d<irr::s32>(width, height),
pFrame->data[0],
true);
first_time = 0;
CurrentTexture = IrrVideoDriver->addTexture("movie", CurrentImage);
}
if (needResize) {
IrrVideoDriver->removeTexture(CurrentTexture);
CurrentImage = IrrVideoDriver->createImageFromData(irr::video::ECF_A8R8G8B8,
irr::core::dimension2d<irr::s32>(width, height),
pFrame->data[0],
true);
CurrentTexture = IrrVideoDriver->addTexture("movie", CurrentImage);
}
p = (s32*)CurrentTexture->lock ();
pimage = (s32*)CurrentImage->lock ();
for (int i = 0; i < width*height; i++) p[i] = pimage[i];
// unlock de texture and the image
CurrentTexture->unlock();
CurrentImage->unlock();
return true;
} /////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////
// internal open file /////////////////////////////////////////////////////////////////////////////////////////////////////////////////////
FilePtr cVideoPlayer::openAVFile(const char *fname) {
static int done = 0;
FilePtr file;
// We need to make sure ffmpeg is initialized. Optionally silence warning output from the lib
if(!done) {av_register_all();
av_log_set_level(AV_LOG_ERROR);}
done = 1;
file = (FilePtr)calloc(1, sizeof(*file));
if (file && av_open_input_file(&file->FmtCtx, fname, NULL, 0, NULL) == 0) {
// After opening, we must search for the stream information because not
// all formats will have it in stream headers (eg. system MPEG streams)
if (av_find_stream_info(file->FmtCtx) >= 0) return file;
av_close_input_file(file->FmtCtx);
}
free(file);
return NULL;
} /////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////
// internal close file ////////////////////////////////////////////////////////////////////////////////////////////////////////////////////
void cVideoPlayer::closeAVFile(FilePtr file) {
size_t i;
if(!file) return;
for(i = 0;i < file->StreamsSize;i++) {
avcodec_close(file->Streams[i]->CodecCtx);
free(file->Streams[i]->Data);
free(file->Streams[i]->DecodedData);
free(file->Streams[i]);
}
free(file->Streams);
av_close_input_file(file->FmtCtx);
free(file);
} /////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////
// internal find the relevent streams /////////////////////////////////////////////////////////////////////////////////////////////////////
StreamPtr cVideoPlayer::getAVAudioStream(FilePtr file, int streamnum) {
unsigned int i;
if (!file) return NULL;
for(i = 0;i < file->FmtCtx->nb_streams;i++) {
if (file->FmtCtx->streams[i]->codec->codec_type != CODEC_TYPE_AUDIO) continue;
if (streamnum == 0) {
StreamPtr stream;
AVCodec *codec;
void *temp;
size_t j;
// Found the requested stream. Check if a handle to this stream
// already exists and return it if it does
for(j = 0;j < file->StreamsSize;j++) {
if (file->Streams[j]->StreamIdx == (int)i) return file->Streams[j];
}
// Doesn't yet exist. Now allocate a new stream object and fill in its info
stream = (StreamPtr)calloc(1, sizeof(*stream));
if (!stream) return NULL;
stream->parent = file;
stream->CodecCtx = file->FmtCtx->streams[i]->codec;
stream->StreamIdx = i;
// Try to find the codec for the given codec ID, and open it
codec = avcodec_find_decoder(stream->CodecCtx->codec_id);
if (!codec || avcodec_open(stream->CodecCtx, codec) < 0) {
free(stream);
return NULL;
}
// Allocate space for the decoded data to be stored in before it gets passed to the app
stream->DecodedData = (char *)malloc(AVCODEC_MAX_AUDIO_FRAME_SIZE);
if (!stream->DecodedData) {
avcodec_close(stream->CodecCtx);
free(stream);
return NULL;
}
// Append the new stream object to the stream list. The original
// pointer will remain valid if realloc fails, so we need to use
// another pointer to watch for errors and not leak memory
temp = realloc(file->Streams, (file->StreamsSize+1) * sizeof(*file->Streams));
if (!temp) {
avcodec_close(stream->CodecCtx);
free(stream->DecodedData);
free(stream);
return NULL;
}
file->Streams = (StreamPtr*)temp;
file->Streams[file->StreamsSize++] = stream;
return stream;
}
streamnum--;
}
return NULL;
}
StreamPtr cVideoPlayer::getAVVideoStream(FilePtr file, int streamnum) {
unsigned int i;
if (!file) return NULL;
for(i = 0;i < file->FmtCtx->nb_streams;i++) {
if (file->FmtCtx->streams[i]->codec->codec_type != CODEC_TYPE_VIDEO) continue;
if (streamnum == 0) {
StreamPtr stream;
AVCodec *codec;
void *temp;
size_t j;
// Found the requested stream. Check if a handle to this stream
// already exists and return it if it does
for(j = 0;j < file->StreamsSize;j++) {
if (file->Streams[j]->StreamIdx == (int)i) return file->Streams[j];
}
// Doesn't yet exist. Now allocate a new stream object and fill in its info
stream = (StreamPtr)calloc(1, sizeof(*stream));
if (!stream) return NULL;
stream->parent = file;
stream->CodecCtx = file->FmtCtx->streams[i]->codec;
stream->StreamIdx = i;
// Try to find the codec for the given codec ID, and open it
codec = avcodec_find_decoder(stream->CodecCtx->codec_id);
if (codec->capabilities & CODEC_CAP_TRUNCATED) stream->CodecCtx->flags|=CODEC_FLAG_TRUNCATED;
if (!codec || avcodec_open(stream->CodecCtx, codec) < 0) {
free(stream);
return NULL;
}
// get the movie framerate
framerate = (float)file->FmtCtx->streams[i]->r_frame_rate.num;
SecondsPerFrame = (double)file->FmtCtx->streams[i]->r_frame_rate.den / file->FmtCtx->streams[i]->r_frame_rate.num;
// setup temp allocations
Frame = avcodec_alloc_frame();
FrameRGB=avcodec_alloc_frame();
if (FrameRGB == NULL) return NULL;
NumBytes = avpicture_get_size(PIX_FMT_RGB32, desiredW, desiredH);
Buffer = new uint8_t[NumBytes];
avpicture_fill((AVPicture *)FrameRGB, Buffer, PIX_FMT_RGB32, desiredW, desiredH);
// Append the new stream object to the stream list. The original
// pointer will remain valid if realloc fails, so we need to use
// another pointer to watch for errors and not leak memory
temp = realloc(file->Streams, (file->StreamsSize+1) * sizeof(*file->Streams));
if (!temp) {
avcodec_close(stream->CodecCtx);
free(stream->DecodedData);
free(stream);
return NULL;
}
file->Streams = (StreamPtr*)temp;
file->Streams[file->StreamsSize++] = stream;
return stream;
}
streamnum--;
}
return NULL;
} /////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////
// internal grab audio stream bits etc ////////////////////////////////////////////////////////////////////////////////////////////////////
int cVideoPlayer::getAVAudioInfo(StreamPtr stream, int *rate, int *channels, int *bits) {
if (!stream || stream->CodecCtx->codec_type != CODEC_TYPE_AUDIO) return 1;
if (rate) *rate = stream->CodecCtx->sample_rate;
if (channels) *channels = stream->CodecCtx->channels;
if (bits) *bits = 16;
return 0;
} /////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////
// internal get next packet ///////////////////////////////////////////////////////////////////////////////////////////////////////////////
bool cVideoPlayer::getNextPacket(FilePtr file, int streamidx) {
static AVPacket packet;
static int bytesRemaining=0;
static uint8_t *rawData;
static int bytesDecoded;
static int frameFinished;
AVFrame *pFrame;
pFrame = avcodec_alloc_frame();
// read frames until we have an audio packet to return
while(av_read_frame(file->FmtCtx, &packet) >= 0) {
StreamPtr *iter = file->Streams;
size_t i;
// Check each stream the user has a handle for, looking for the one this packet belongs to
for(i = 0;i < file->StreamsSize;i++,iter++) {
if ((*iter)->StreamIdx == packet.stream_index) {
if (packet.stream_index == streamidx) { // audio packets
size_t idx = (*iter)->DataSize;
// Found the stream. Grow the input data buffer as needed to
// hold the new packet's data. Additionally, some ffmpeg codecs
// need some padding so they don't overread the allocated buffer
if (idx+packet.size > (*iter)->DataSizeMax) {
void *temp = realloc((*iter)->Data, idx+packet.size + FF_INPUT_BUFFER_PADDING_SIZE);
if (!temp) break;
(*iter)->Data = (char *)temp;
(*iter)->DataSizeMax = idx+packet.size;
}
// Copy the packet and free it
memcpy(&(*iter)->Data[idx], packet.data, packet.size);
(*iter)->DataSize += packet.size;
// Return if this stream is what we needed a packet for
if (streamidx == (*iter)->StreamIdx) {
av_free_packet(&packet);
return true;
}
break;
} else { // continue decoding video frames to the buffer
bytesRemaining += packet.size;
rawData = packet.data;
// Work on the current packet until we have decoded all of it
while (bytesRemaining > 0) {
// Decode the next chunk of data
bytesDecoded = avcodec_decode_video((*iter)->CodecCtx, pFrame, &frameFinished, rawData, bytesRemaining);
// Was there an error?
if (bytesDecoded < 0) {
fprintf(stderr, "Error while decoding frame\n");
//return false;
}
bytesRemaining -= bytesDecoded;
rawData += bytesDecoded;
// Did we finish the current frame? Then we can return
if (frameFinished) { // add the current frame to the buffer
Frame_Buffer.push_back(*pFrame);
av_free(pFrame);
pFrame = avcodec_alloc_frame();
frameFinished = false;
}
}
}
}
}
// Free the packet and look for another
av_free_packet(&packet);
if (pFrame != NULL)
av_free(pFrame);
}
return false;
} /////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////
// internal get audio data ////////////////////////////////////////////////////////////////////////////////////////////////////////////////
int cVideoPlayer::getAVAudioData(StreamPtr stream, void *data, int length) {
int dec = 0;
if (!stream || stream->CodecCtx->codec_type != CODEC_TYPE_AUDIO) return 0;
while(dec < length) {
// If there's any pending decoded data, deal with it first
if (stream->DecodedDataSize > 0) {
// Get the amount of bytes remaining to be written,
// and clamp to the amount of decoded data we have
size_t rem = length-dec;
if (rem > stream->DecodedDataSize) rem = stream->DecodedDataSize;
// Copy the data to the app's buffer and increment
memcpy(data, stream->DecodedData, rem);
data = (char*)data + rem;
dec += rem;
// If there's any decoded data left, move it to the front of the
// buffer for next time
if (rem < stream->DecodedDataSize)
memmove(stream->DecodedData, &stream->DecodedData[rem], stream->DecodedDataSize - rem);
stream->DecodedDataSize -= rem;
}
// Check if we need to get more decoded data
if (stream->DecodedDataSize == 0) {
size_t insize;
int size;
int len;
insize = stream->DataSize;
if (insize == 0) {
getNextPacket(stream->parent, stream->StreamIdx);
// If there's no more input data, break and return what we have
if (insize == stream->DataSize) break;
insize = stream->DataSize;
memset(&stream->Data[insize], 0, FF_INPUT_BUFFER_PADDING_SIZE);
}
// Clear the input padding bits
// Decode some data, and check for errors
size = AVCODEC_MAX_AUDIO_FRAME_SIZE;
while((len=avcodec_decode_audio2(stream->CodecCtx, (int16_t*)stream->DecodedData,
&size, (uint8_t*)stream->Data, insize)) == 0) {
if (size > 0) break;
getNextPacket(stream->parent, stream->StreamIdx);
if (insize == stream->DataSize) break;
insize = stream->DataSize;
memset(&stream->Data[insize], 0, FF_INPUT_BUFFER_PADDING_SIZE);
}
if (len < 0) break;
if (len > 0) {
// If any input data is left, move it to the start of the
// buffer, and decrease the buffer size
size_t rem = insize-len;
if (rem) memmove(stream->Data, &stream->Data[len], rem);
stream->DataSize = rem;
}
// Set the output buffer size
stream->DecodedDataSize = size;
}
}
// Return the number of bytes we were able to get
return dec;
} /////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////
// switch res /////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////
void cVideoPlayer::changeResolution(int w, int h) {
if (desiredW != w || desiredH != h){
std::cout << "Changing resolution from ["<< desiredW << "x" << desiredH << "] to [" << w << "x" << h << "]" << std::endl;
desiredW = w;
desiredH = h;
delete [] Buffer;
//av_free((AVPicture *)FrameRGB);
NumBytes = avpicture_get_size(PIX_FMT_RGB32, desiredW, desiredH);
Buffer = new uint8_t[NumBytes];
// Assign appropriate parts of buffer to image planes in pFrameRGB
avpicture_fill((AVPicture *)FrameRGB, Buffer, PIX_FMT_RGB32, desiredW, desiredH);
}
} /////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////
// write back the current texture(or frame if u prefer) ///////////////////////////////////////////////////////////////////////////////////
irr::video::ITexture* cVideoPlayer::getVideoTexture(void) {
return CurrentTexture;
} /////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////
// return current frame rate //////////////////////////////////////////////////////////////////////////////////////////////////////////////
float cVideoPlayer::getFrameRate(void) {
return framerate;
} /////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////
// destruct ///////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////
cVideoPlayer::~cVideoPlayer() {
// Free the RGB image
if (Buffer != NULL)
delete [] Buffer;
if (FrameRGB != NULL)
av_free(FrameRGB);
// Free the YUV frame
if (Frame != NULL)
av_free(Frame);
// Close the codec
if (streamV->CodecCtx != NULL)
avcodec_close(streamV->CodecCtx);
// close the file
closeAVFile(file);
fprintf(stderr, "\nDone.\n");
#ifdef SOUND_OPENAL
// All data has been streamed in. Wait until the source stops playing it
do {
alutSleep((ALfloat)0.01);
alGetSourcei(source, AL_SOURCE_STATE, &state);
} while(alGetError() == AL_NO_ERROR && state == AL_PLAYING);
// All files done. Delete the source and buffers, and close OpenAL
alDeleteSources(1, &source);
alDeleteBuffers(NUM_BUFFERS, buffers);
alutExit();
free(data);
#endif
} /////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////