Merge pull request #8867 from unknownbrackets/mpeg

Improve mpeg parsing / corruption issues
This commit is contained in:
Henrik Rydgård 2016-08-06 18:14:17 +02:00 committed by GitHub
commit eac18489e6
14 changed files with 120 additions and 52 deletions

View File

@ -337,7 +337,6 @@ static void AnalyzeMpeg(u8 *buffer, MpegContext *ctx) {
// TODO: Does this make any sense?
ctx->mediaengine->loadStream(buffer, ctx->mpegOffset, 0);
}
ctx->mediaengine->setVideoDim();
}
// When used with scePsmf, some applications attempt to use sceMpegQueryStreamOffset

View File

@ -42,6 +42,9 @@ static const int PSMF_STREAM_SIZE_OFFSET = 0xC;
static const int PSMF_FIRST_TIMESTAMP_OFFSET = 0x54;
static const int PSMF_LAST_TIMESTAMP_OFFSET = 0x5A;
static const int PSMF_VIDEO_STREAM_ID = 0xE0;
static const int PSMF_AUDIO_STREAM_ID = 0xBD;
struct SceMpegAu {
s64_le pts; // presentation time stamp
s64_le dts; // decode time stamp

View File

@ -33,8 +33,6 @@
#include <algorithm>
// "Go Sudoku" is a good way to test this code...
const int PSMF_VIDEO_STREAM_ID = 0xE0;
const int PSMF_AUDIO_STREAM_ID = 0xBD;
const int PSMF_AVC_STREAM = 0;
const int PSMF_ATRAC_STREAM = 1;
const int PSMF_PCM_STREAM = 2;

View File

@ -147,6 +147,7 @@ MediaEngine::MediaEngine(): m_pdata(0) {
m_ringbuffersize = 0;
m_mpegheaderReadPos = 0;
m_mpegheaderSize = sizeof(m_mpegheader);
m_audioType = PSP_CODEC_AT3PLUS; // in movie, we use only AT3+ audio
}
@ -166,8 +167,8 @@ void MediaEngine::closeMedia() {
m_isVideoEnd = false;
}
void MediaEngine::DoState(PointerWrap &p){
auto s = p.Section("MediaEngine", 1, 3);
void MediaEngine::DoState(PointerWrap &p) {
auto s = p.Section("MediaEngine", 1, 5);
if (!s)
return;
@ -175,6 +176,16 @@ void MediaEngine::DoState(PointerWrap &p){
p.Do(m_audioStream);
p.DoArray(m_mpegheader, sizeof(m_mpegheader));
if (s >= 4) {
p.Do(m_mpegheaderSize);
} else {
m_mpegheaderSize = sizeof(m_mpegheader);
}
if (s >= 5) {
p.Do(m_mpegheaderReadPos);
} else {
m_mpegheaderReadPos = m_mpegheaderSize;
}
p.Do(m_ringbuffersize);
@ -188,8 +199,6 @@ void MediaEngine::DoState(PointerWrap &p){
u32 hasopencontext = false;
#endif
p.Do(hasopencontext);
if (hasopencontext && p.mode == p.MODE_READ)
openContext();
if (m_pdata)
m_pdata->DoState(p);
if (m_demux)
@ -203,6 +212,10 @@ void MediaEngine::DoState(PointerWrap &p){
p.Do(m_lastTimeStamp);
}
if (hasopencontext && p.mode == p.MODE_READ) {
openContext(true);
}
p.Do(m_isVideoEnd);
bool noAudioDataRemoved;
p.Do(noAudioDataRemoved);
@ -213,18 +226,14 @@ void MediaEngine::DoState(PointerWrap &p){
}
}
int _MpegReadbuffer(void *opaque, uint8_t *buf, int buf_size)
{
static int MpegReadbuffer(void *opaque, uint8_t *buf, int buf_size) {
MediaEngine *mpeg = (MediaEngine *)opaque;
int size = buf_size;
const int mpegheaderSize = sizeof(mpeg->m_mpegheader);
if (mpeg->m_mpegheaderReadPos < mpegheaderSize) {
size = std::min(buf_size, mpegheaderSize - mpeg->m_mpegheaderReadPos);
if (mpeg->m_mpegheaderReadPos < mpeg->m_mpegheaderSize) {
size = std::min(buf_size, mpeg->m_mpegheaderSize - mpeg->m_mpegheaderReadPos);
memcpy(buf, mpeg->m_mpegheader + mpeg->m_mpegheaderReadPos, size);
mpeg->m_mpegheaderReadPos += size;
} else if (mpeg->m_mpegheaderReadPos == mpegheaderSize) {
return 0;
} else {
size = mpeg->m_pdata->pop_front(buf, buf_size);
if (size > 0)
@ -233,28 +242,73 @@ int _MpegReadbuffer(void *opaque, uint8_t *buf, int buf_size)
return size;
}
bool MediaEngine::openContext() {
bool MediaEngine::SetupStreams() {
#ifdef USE_FFMPEG
const u32 magic = *(u32_le *)&m_mpegheader[0];
if (magic != PSMF_MAGIC) {
WARN_LOG_REPORT(ME, "Could not setup streams, bad magic: %08x", magic);
return false;
}
int numStreams = *(u16_be *)&m_mpegheader[0x80];
if (numStreams <= 0 || numStreams > 8) {
// Looks crazy. Let's bail out and let FFmpeg handle it.
WARN_LOG_REPORT(ME, "Could not setup streams, unexpected stream count: %d", numStreams);
return false;
}
// Looking good. Let's add those streams.
const AVCodec *h264_codec = avcodec_find_decoder(AV_CODEC_ID_H264);
for (int i = 0; i < numStreams; i++) {
const u8 *const currentStreamAddr = m_mpegheader + 0x82 + i * 16;
int streamId = currentStreamAddr[0];
// We only set video streams. We demux the audio stream separately.
if ((streamId & PSMF_VIDEO_STREAM_ID) == PSMF_VIDEO_STREAM_ID) {
AVStream *stream = avformat_new_stream(m_pFormatCtx, h264_codec);
stream->id = 0x00000100 | streamId;
stream->request_probe = 0;
stream->need_parsing = AVSTREAM_PARSE_FULL;
// We could set the width here, but we don't need to.
}
}
#endif
return true;
}
bool MediaEngine::openContext(bool keepReadPos) {
#ifdef USE_FFMPEG
InitFFmpeg();
if (m_pFormatCtx || !m_pdata)
return false;
m_mpegheaderReadPos = 0;
if (!keepReadPos) {
m_mpegheaderReadPos = 0;
}
m_decodingsize = 0;
u8* tempbuf = (u8*)av_malloc(m_bufSize);
m_bufSize = std::max(m_bufSize, m_mpegheaderSize);
u8 *tempbuf = (u8*)av_malloc(m_bufSize);
m_pFormatCtx = avformat_alloc_context();
m_pIOContext = avio_alloc_context(tempbuf, m_bufSize, 0, (void*)this, _MpegReadbuffer, NULL, 0);
m_pIOContext = avio_alloc_context(tempbuf, m_bufSize, 0, (void*)this, &MpegReadbuffer, nullptr, nullptr);
m_pFormatCtx->pb = m_pIOContext;
// Open video file
if (avformat_open_input((AVFormatContext**)&m_pFormatCtx, NULL, NULL, NULL) != 0)
AVDictionary *open_opt = nullptr;
av_dict_set_int(&open_opt, "probesize", m_mpegheaderSize, 0);
if (avformat_open_input((AVFormatContext**)&m_pFormatCtx, nullptr, nullptr, &open_opt) != 0) {
av_dict_free(&open_opt);
return false;
}
av_dict_free(&open_opt);
if (avformat_find_stream_info(m_pFormatCtx, NULL) < 0) {
closeContext();
return false;
if (!SetupStreams()) {
// Fallback to old behavior.
if (avformat_find_stream_info(m_pFormatCtx, NULL) < 0) {
closeContext();
return false;
}
}
if (m_videoStream >= (int)m_pFormatCtx->nb_streams) {
@ -280,8 +334,6 @@ bool MediaEngine::openContext() {
setVideoDim();
m_audioContext = new SimpleAudio(m_audioType, 44100, 2);
m_isVideoEnd = false;
m_mpegheaderReadPos++;
av_seek_frame(m_pFormatCtx, m_videoStream, 0, 0);
#endif // USE_FFMPEG
return true;
}
@ -343,9 +395,8 @@ int MediaEngine::addStreamData(const u8 *buffer, int addSize) {
}
#ifdef USE_FFMPEG
if (!m_pFormatCtx && m_pdata->getQueueSize() >= 2048) {
m_pdata->get_front(m_mpegheader, sizeof(m_mpegheader));
int mpegoffset = (int)(*(s32_be*)(m_mpegheader + 8));
m_pdata->pop_front(0, mpegoffset);
m_mpegheaderSize = m_pdata->get_front(m_mpegheader, sizeof(m_mpegheader));
m_pdata->pop_front(0, m_mpegheaderSize);
openContext();
}
#endif // USE_FFMPEG
@ -408,8 +459,7 @@ bool MediaEngine::setVideoStream(int streamNum, bool force) {
}
// Open codec
AVDictionary *optionsDict = 0;
if (avcodec_open2(m_pCodecCtx, pCodec, &optionsDict) < 0) {
if (avcodec_open2(m_pCodecCtx, pCodec, nullptr) < 0) {
return false; // Could not open codec
}
m_pCodecCtxs[streamNum] = m_pCodecCtx;
@ -441,11 +491,19 @@ bool MediaEngine::setVideoDim(int width, int height)
}
// Allocate video frame
m_pFrame = av_frame_alloc();
if (!m_pFrame) {
m_pFrame = av_frame_alloc();
}
sws_freeContext(m_sws_ctx);
m_sws_ctx = NULL;
m_sws_fmt = -1;
if (m_desWidth == 0 || m_desHeight == 0) {
// Can't setup SWS yet, so stop for now.
return false;
}
updateSwsFormat(GE_CMODE_32BIT_ABGR8888);
// Allocate video frame for RGB24
@ -513,14 +571,9 @@ bool MediaEngine::stepVideo(int videoPixelMode, bool skipFrame) {
return false;
if (!m_pCodecCtx)
return false;
if ((!m_pFrame)||(!m_pFrameRGB))
if (!m_pFrame)
return false;
updateSwsFormat(videoPixelMode);
// TODO: Technically we could set this to frameWidth instead of m_desWidth for better perf.
// Update the linesize for the new format too. We started with the largest size, so it should fit.
m_pFrameRGB->linesize[0] = getPixelFormatBytes(videoPixelMode) * m_desWidth;
AVPacket packet;
av_init_packet(&packet);
int frameFinished;
@ -541,7 +594,15 @@ bool MediaEngine::stepVideo(int videoPixelMode, bool skipFrame) {
int result = avcodec_decode_video2(m_pCodecCtx, m_pFrame, &frameFinished, &packet);
if (frameFinished) {
if (!skipFrame) {
if (!m_pFrameRGB) {
setVideoDim();
}
if (m_pFrameRGB && !skipFrame) {
updateSwsFormat(videoPixelMode);
// TODO: Technically we could set this to frameWidth instead of m_desWidth for better perf.
// Update the linesize for the new format too. We started with the largest size, so it should fit.
m_pFrameRGB->linesize[0] = getPixelFormatBytes(videoPixelMode) * m_desWidth;
sws_scale(m_sws_ctx, m_pFrame->data, m_pFrame->linesize, 0,
m_pCodecCtx->height, m_pFrameRGB->data, m_pFrameRGB->linesize);
}

View File

@ -60,7 +60,7 @@ public:
bool loadStream(const u8 *buffer, int readSize, int RingbufferSize);
bool reloadStream();
// open the mpeg context
bool openContext();
bool openContext(bool keepReadPos = false);
void closeContext();
// Returns number of packets actually added. I guess the buffer might be full.
@ -81,7 +81,6 @@ public:
int xpos, int ypos, int width, int height);
int getAudioSamples(u32 bufferPtr);
bool setVideoDim(int width = 0, int height = 0);
s64 getVideoTimeStamp();
s64 getAudioTimeStamp();
s64 getLastTimeStamp();
@ -94,6 +93,8 @@ public:
void DoState(PointerWrap &p);
private:
bool SetupStreams();
bool setVideoDim(int width = 0, int height = 0);
void updateSwsFormat(int videoPixelMode);
int getNextAudioFrame(u8 **buf, int *headerCode1, int *headerCode2);
@ -135,6 +136,7 @@ public: // TODO: Very little of this below should be public.
int m_ringbuffersize;
u8 m_mpegheader[0x10000]; // TODO: Allocate separately
int m_mpegheaderReadPos;
int m_mpegheaderSize;
// used for audio type
int m_audioType;

View File

@ -332,11 +332,11 @@ int MpegDemux::getNextAudioFrame(u8 **buf, int *headerCode1, int *headerCode2, s
bool MpegDemux::hasNextAudioFrame(int *gotsizeOut, int *frameSizeOut, int *headerCode1, int *headerCode2)
{
int gotsize = m_audioStream.get_front(m_audioFrame, 0x2000);
if (gotsize == 0 || !isHeader(m_audioFrame, 0))
if (gotsize < 4 || !isHeader(m_audioFrame, 0))
return false;
u8 code1 = m_audioFrame[2];
u8 code2 = m_audioFrame[3];
int frameSize = (((code1 & 0x03) << 8) | ((code2 & 0xFF) * 8)) + 0x10;
int frameSize = (((code1 & 0x03) << 8) | (code2 * 8)) + 0x10;
if (frameSize > gotsize)
return false;

View File

@ -953,6 +953,11 @@ void FramebufferManagerCommon::NotifyBlockTransferAfter(u32 dstBasePtr, int dstS
const u8 *srcBase = Memory::GetPointerUnchecked(srcBasePtr) + (srcX + srcY * srcStride) * bpp;
int dstBpp = dstBuffer->format == GE_FORMAT_8888 ? 4 : 2;
float dstXFactor = (float)bpp / dstBpp;
if (dstWidth > dstBuffer->width || dstHeight > dstBuffer->height) {
// The buffer isn't big enough, and we have a clear hint of size. Resize.
// This happens in Valkyrie Profile when uploading video at the ending.
ResizeFramebufFBO(dstBuffer, dstWidth, dstHeight, false, true);
}
DrawPixels(dstBuffer, static_cast<int>(dstX * dstXFactor), dstY, srcBase, dstBuffer->format, static_cast<int>(srcStride * dstXFactor), static_cast<int>(dstWidth * dstXFactor), dstHeight);
SetColorUpdated(dstBuffer, skipDrawReason);
RebindFramebuffer();

View File

@ -245,7 +245,7 @@ protected:
static bool MaskedEqual(u32 addr1, u32 addr2);
virtual void DestroyFramebuf(VirtualFramebuffer *vfb) = 0;
virtual void ResizeFramebufFBO(VirtualFramebuffer *vfb, u16 w, u16 h, bool force = false) = 0;
virtual void ResizeFramebufFBO(VirtualFramebuffer *vfb, u16 w, u16 h, bool force = false, bool skipCopy = false) = 0;
virtual void NotifyRenderFramebufferCreated(VirtualFramebuffer *vfb) = 0;
virtual void NotifyRenderFramebufferSwitched(VirtualFramebuffer *prevVfb, VirtualFramebuffer *vfb, bool isClearingDepth) = 0;
virtual void NotifyRenderFramebufferUpdated(VirtualFramebuffer *vfb, bool vfbFormatChanged) = 0;

View File

@ -302,7 +302,7 @@ namespace DX9 {
}
}
void FramebufferManagerDX9::ResizeFramebufFBO(VirtualFramebuffer *vfb, u16 w, u16 h, bool force) {
void FramebufferManagerDX9::ResizeFramebufFBO(VirtualFramebuffer *vfb, u16 w, u16 h, bool force, bool skipCopy) {
VirtualFramebuffer old = *vfb;
if (force) {
@ -362,7 +362,7 @@ namespace DX9 {
if (vfb->fbo) {
fbo_bind_as_render_target(vfb->fbo_dx9);
ClearBuffer();
if (!g_Config.bDisableSlowFramebufEffects) {
if (!skipCopy && !g_Config.bDisableSlowFramebufEffects) {
BlitFramebuffer(vfb, 0, 0, &old, 0, 0, std::min(vfb->bufferWidth, vfb->width), std::min(vfb->height, vfb->bufferHeight), 0);
}
}

View File

@ -79,8 +79,8 @@ public:
virtual bool NotifyStencilUpload(u32 addr, int size, bool skipZero = false) override;
void DestroyFramebuf(VirtualFramebuffer *vfb);
void ResizeFramebufFBO(VirtualFramebuffer *vfb, u16 w, u16 h, bool force = false);
void DestroyFramebuf(VirtualFramebuffer *vfb) override;
void ResizeFramebufFBO(VirtualFramebuffer *vfb, u16 w, u16 h, bool force = false, bool skipCopy = false) override;
bool GetCurrentFramebuffer(GPUDebugBuffer &buffer, int maxRes);
bool GetCurrentDepthbuffer(GPUDebugBuffer &buffer);

View File

@ -557,7 +557,7 @@ void FramebufferManager::RebindFramebuffer() {
glstate.viewport.restore();
}
void FramebufferManager::ResizeFramebufFBO(VirtualFramebuffer *vfb, u16 w, u16 h, bool force) {
void FramebufferManager::ResizeFramebufFBO(VirtualFramebuffer *vfb, u16 w, u16 h, bool force, bool skipCopy) {
VirtualFramebuffer old = *vfb;
if (force) {
@ -617,7 +617,7 @@ void FramebufferManager::ResizeFramebufFBO(VirtualFramebuffer *vfb, u16 w, u16 h
if (vfb->fbo) {
fbo_bind_as_render_target(vfb->fbo);
ClearBuffer();
if (!g_Config.bDisableSlowFramebufEffects) {
if (!skipCopy && !g_Config.bDisableSlowFramebufEffects) {
BlitFramebuffer(vfb, 0, 0, &old, 0, 0, std::min(vfb->bufferWidth, vfb->width), std::min(vfb->height, vfb->bufferHeight), 0);
}
}

View File

@ -106,7 +106,7 @@ public:
bool NotifyStencilUpload(u32 addr, int size, bool skipZero = false) override;
void DestroyFramebuf(VirtualFramebuffer *vfb) override;
void ResizeFramebufFBO(VirtualFramebuffer *vfb, u16 w, u16 h, bool force = false) override;
void ResizeFramebufFBO(VirtualFramebuffer *vfb, u16 w, u16 h, bool force = false, bool skipCopy = false) override;
bool GetFramebuffer(u32 fb_address, int fb_stride, GEBufferFormat format, GPUDebugBuffer &buffer, int maxRes);
bool GetDepthbuffer(u32 fb_address, int fb_stride, u32 z_address, int z_stride, GPUDebugBuffer &buffer);

View File

@ -533,7 +533,7 @@ void FramebufferManagerVulkan::RebindFramebuffer() {
// Switch command buffer?
}
void FramebufferManagerVulkan::ResizeFramebufFBO(VirtualFramebuffer *vfb, u16 w, u16 h, bool force) {
void FramebufferManagerVulkan::ResizeFramebufFBO(VirtualFramebuffer *vfb, u16 w, u16 h, bool force, bool skipCopy) {
return;
/*
@ -596,7 +596,7 @@ void FramebufferManagerVulkan::ResizeFramebufFBO(VirtualFramebuffer *vfb, u16 w,
if (vfb->fbo_vk) {
/// fbo_bind_as_render_target(vfb->fbo_vk);
ClearBuffer();
if (!g_Config.bDisableSlowFramebufEffects) {
if (!skipCopy && !g_Config.bDisableSlowFramebufEffects) {
BlitFramebuffer(vfb, 0, 0, &old, 0, 0, std::min(vfb->bufferWidth, vfb->width), std::min(vfb->height, vfb->bufferHeight), 0);
}
}

View File

@ -122,7 +122,7 @@ public:
bool NotifyStencilUpload(u32 addr, int size, bool skipZero = false) override;
void DestroyFramebuf(VirtualFramebuffer *vfb) override;
void ResizeFramebufFBO(VirtualFramebuffer *vfb, u16 w, u16 h, bool force = false) override;
void ResizeFramebufFBO(VirtualFramebuffer *vfb, u16 w, u16 h, bool force = false, bool skipCopy = false) override;
bool GetFramebuffer(u32 fb_address, int fb_stride, GEBufferFormat format, GPUDebugBuffer &buffer);
bool GetDepthbuffer(u32 fb_address, int fb_stride, u32 z_address, int z_stride, GPUDebugBuffer &buffer);