AvPlayer HLE (#58)

* Adds some libSceAvPlayer dummy functions, ps4_sceAvPlayerAddSource cache files to 'avplayer_dump' directory

* Its better for languageCode to be array of chars

* SysLogPrefix

* us -> ms

* Adds ffmpeg headers

* Fix compilation

* sceAvPlayerInitEx

* spinlock, proper close ffmpeg stuff

* Refactor + fix

* NextPacket

* ReceiveAudio

* ReceiveVideo

* audio works

* Minor fix

* Minor

* accurate GetTimeInUs

* sceAvPlayerPostInit and sceAvPlayerStop

* Is not GPU Addr err

* Fixing Structure Alignments

* The original timeStamp is listed in ms in the documentation

* Forgotten "not" and disable Exit(False); (Seems to work, need to test)

* Should check patch folder first

* Minor

* test CI

* use parse_filename

* _sceAvPlayerInit, _sceAvPlayerInitEx

* ps4_sceAvPlayerPostInit, _sceAvPlayerAddSource

* fix types

* _sceAvPlayerGetAudioData

* _sceAvPlayerGetVideoDataEx

* _sceAvPlayerStop, _sceAvPlayerClose

* Removed outdated comments + long name

* Use MemChunk instead of a simple Pointer

* fix init value

* convert to utf8

Co-authored-by: Pavel <68122101+red-prig@users.noreply.github.com>
This commit is contained in:
Kagamma 2023-01-17 20:32:47 +07:00 committed by GitHub
parent 9e3d998431
commit 594cd56cb6
No known key found for this signature in database
GPG Key ID: 4AEE18F83AFDEB23
17 changed files with 25014 additions and 5 deletions

View File

@ -38,12 +38,21 @@ jobs:
lazbuild -B fpPS4.lpi > nul
strip fpPS4.exe
- name: Download
shell: cmd
working-directory: ./
run: |
curl -k -L -s https://github.com/red-prig/fpps4-bin/raw/main/ffmpeg.zip -o ffmpeg.zip
unzip ffmpeg.zip
- name: Upload artifacts
uses: actions/upload-artifact@v2
if: ${{ !startsWith(github.ref, 'refs/tags/') }}
with:
name: fpPS4
path: fpPS4.exe
path: |
fpPS4.exe
*.dll
if-no-files-found: warn
- name: Pack
@ -53,7 +62,7 @@ jobs:
run: |
mkdir sce_module
echo "Put libSceNgs2.prx and etc. here" > sce_module/info.txt
zip -9 -qq -r "fpPS4_%GITHUB_REF_NAME%.zip" "fpPS4.exe" "sce_module/info.txt"
zip -9 -qq -r "fpPS4_%GITHUB_REF_NAME%.zip" "fpPS4.exe" "*.dll" "sce_module/info.txt"
- name: Release
uses: red-prig/action-gh-release@v1

1
.gitignore vendored
View File

@ -16,4 +16,5 @@ link.res
lib/
backup/
shader_dump/*
avplayer_dump/*
savedata/*

371
ffmpeg/ffmpeg.inc Normal file
View File

@ -0,0 +1,371 @@
{$POINTERMATH ON}
{$MINENUMSIZE 4} (* use 4-byte enums *)
{$WRITEABLECONST ON}
(*
* FF_API_* defines may be placed below to indicate public API that will be
* dropped at a future version bump. The defines themselves are not part of
* the public API and may change, break or disappear at any time.
*
* @note, when bumping the major version it is recommended to manually
* disable each FF_API_* in its own commit instead of disabling them all
* at once through the bump. This improves the git bisect-ability of the change.
*)
const
{$REGION 'libavutil'}
LIBAVUTIL_VERSION_MAJOR = 56;
LIBAVUTIL_VERSION_MAJOR_STR = '56';
LIBAVUTIL_VERSION_MINOR = 31;
LIBAVUTIL_VERSION_MICRO = 100;
FFMPEG_VERSION = '4.2.2';
LIBAVUTIL_VERSION_INT = ((LIBAVUTIL_VERSION_MAJOR shl 16) or (LIBAVUTIL_VERSION_MINOR shl 8) or LIBAVUTIL_VERSION_MICRO);
{$IFNDEF FF_API_VAAPI}
{$IF LIBAVUTIL_VERSION_MAJOR < 57}
{$DEFINE FF_API_VAAPI}
{$ENDIF}
{$ENDIF}
{$IFNDEF FF_API_FRAME_QP}
{$IF LIBAVUTIL_VERSION_MAJOR < 57}
{$DEFINE FF_API_FRAME_QP}
{$ENDIF}
{$ENDIF}
{$IFNDEF FF_API_PLUS1_MINUS1}
{$IF LIBAVUTIL_VERSION_MAJOR < 57}
{$DEFINE FF_API_PLUS1_MINUS1}
{$ENDIF}
{$ENDIF}
{$IFNDEF FF_API_ERROR_FRAME}
{$IF LIBAVUTIL_VERSION_MAJOR < 57}
{$DEFINE FF_API_ERROR_FRAME}
{$ENDIF}
{$ENDIF}
{$IFNDEF FF_API_PKT_PTS}
{$IF LIBAVUTIL_VERSION_MAJOR < 57}
{$DEFINE FF_API_PKT_PTS}
{$ENDIF}
{$ENDIF}
{$IFNDEF FF_API_CRYPTO_SIZE_T}
{$IF LIBAVUTIL_VERSION_MAJOR < 57}
{$DEFINE FF_API_CRYPTO_SIZE_T}
{$ENDIF}
{$ENDIF}
{$IFNDEF FF_API_FRAME_GET_SET}
{$IF LIBAVUTIL_VERSION_MAJOR < 57}
{$DEFINE FF_API_FRAME_GET_SET}
{$ENDIF}
{$ENDIF}
{$IFNDEF FF_API_PSEUDOPAL}
{$IF LIBAVUTIL_VERSION_MAJOR < 57}
{$DEFINE FF_API_PSEUDOPAL} // Отсутсвует
{$ENDIF}
{$ENDIF}
{$ENDREGION}
{$REGION 'libswscale'}
LIBSWSCALE_VERSION_MAJOR = 5;
LIBSWSCALE_VERSION_MAJOR_STR = '5';
LIBSWSCALE_VERSION_MINOR = 5;
LIBSWSCALE_VERSION_MICRO = 100;
{$ENDREGION}
{$REGION 'libavcodec'}
LIBAVCODEC_VERSION_MAJOR = 58;
LIBAVCODEC_VERSION_MAJOR_STR = '58';
LIBAVCODEC_VERSION_MINOR = 54;
LIBAVCODEC_VERSION_MICRO = 100;
{$IFNDEF FF_API_LOWRES}
{$IF LIBAVCODEC_VERSION_MAJOR < 59}
{$DEFINE FF_API_LOWRES}
{$ENDIF}
{$ENDIF}
{$IFNDEF FF_API_DEBUG_MV}
{$IF LIBAVCODEC_VERSION_MAJOR < 58}
{$DEFINE FF_API_DEBUG_MV}
{$ENDIF}
{$ENDIF}
{$IFNDEF FF_API_AVCTX_TIMEBASE}
{$IF LIBAVCODEC_VERSION_MAJOR < 59}
{$DEFINE FF_API_AVCTX_TIMEBASE} // Отсутсвует
{$ENDIF}
{$ENDIF}
{$IFNDEF FF_API_CODED_FRAME}
{$IF LIBAVCODEC_VERSION_MAJOR < 59}
{$DEFINE FF_API_CODED_FRAME}
{$ENDIF}
{$ENDIF}
{$IFNDEF FF_API_SIDEDATA_ONLY_PKT}
{$IF LIBAVCODEC_VERSION_MAJOR < 59}
{$DEFINE FF_API_SIDEDATA_ONLY_PKT}
{$ENDIF}
{$ENDIF}
{$IFNDEF FF_API_VDPAU_PROFILE}
{$IF LIBAVCODEC_VERSION_MAJOR < 59}
{$DEFINE FF_API_VDPAU_PROFILE} // Отсутсвует
{$ENDIF}
{$ENDIF}
{$IFNDEF FF_API_CONVERGENCE_DURATION}
{$IF LIBAVCODEC_VERSION_MAJOR < 59}
{$DEFINE FF_API_CONVERGENCE_DURATION}
{$ENDIF}
{$ENDIF}
{$IFNDEF FF_API_AVPICTURE}
{$IF LIBAVCODEC_VERSION_MAJOR < 59}
{$DEFINE FF_API_AVPICTURE}
{$ENDIF}
{$ENDIF}
{$IFNDEF FF_API_AVPACKET_OLD_API}
{$IF LIBAVCODEC_VERSION_MAJOR < 59}
{$DEFINE FF_API_AVPACKET_OLD_API}
{$ENDIF}
{$ENDIF}
{$IFNDEF FF_API_RTP_CALLBACK}
{$IF LIBAVCODEC_VERSION_MAJOR < 59}
{$DEFINE FF_API_RTP_CALLBACK}
{$ENDIF}
{$ENDIF}
{$IFNDEF FF_API_VBV_DELAY}
{$IF LIBAVCODEC_VERSION_MAJOR < 59}
{$DEFINE FF_API_VBV_DELAY}
{$ENDIF}
{$ENDIF}
{$IFNDEF FF_API_CODER_TYPE}
{$IF LIBAVCODEC_VERSION_MAJOR < 59}
{$DEFINE FF_API_CODER_TYPE}
{$ENDIF}
{$ENDIF}
{$IFNDEF FF_API_STAT_BITS}
{$IF LIBAVCODEC_VERSION_MAJOR < 59}
{$DEFINE FF_API_STAT_BITS}
{$ENDIF}
{$ENDIF}
{$IFNDEF FF_API_PRIVATE_OPT}
{$IF LIBAVCODEC_VERSION_MAJOR < 59}
{$DEFINE FF_API_PRIVATE_OPT}
{$ENDIF}
{$ENDIF}
{$IFNDEF FF_API_ASS_TIMING}
{$IF LIBAVCODEC_VERSION_MAJOR < 59}
{$DEFINE FF_API_ASS_TIMING}
{$ENDIF}
{$ENDIF}
{$IFNDEF FF_API_OLD_BSF}
{$IF LIBAVCODEC_VERSION_MAJOR < 59}
{$DEFINE FF_API_OLD_BSF}
{$ENDIF}
{$ENDIF}
{$IFNDEF FF_API_COPY_CONTEXT}
{$IF LIBAVCODEC_VERSION_MAJOR < 59}
{$DEFINE FF_API_COPY_CONTEXT}
{$ENDIF}
{$ENDIF}
{$IFNDEF FF_API_GET_CONTEXT_DEFAULTS}
{$IF LIBAVCODEC_VERSION_MAJOR < 59}
{$DEFINE FF_API_GET_CONTEXT_DEFAULTS}
{$ENDIF}
{$ENDIF}
{$IFNDEF FF_API_NVENC_OLD_NAME}
{$IF LIBAVCODEC_VERSION_MAJOR < 59}
{$DEFINE FF_API_NVENC_OLD_NAME} // Отсутсвует
{$ENDIF}
{$ENDIF}
{$IFNDEF FF_API_STRUCT_VAAPI_CONTEXT}
{$IF LIBAVCODEC_VERSION_MAJOR < 59}
{$DEFINE FF_API_STRUCT_VAAPI_CONTEXT}
{$ENDIF}
{$ENDIF}
{$IFNDEF FF_API_MERGE_SD_API}
{$IF LIBAVCODEC_VERSION_MAJOR < 59}
{$DEFINE FF_API_MERGE_SD_API}
{$ENDIF}
{$ENDIF}
{$IFNDEF FF_API_TAG_STRING}
{$IF LIBAVCODEC_VERSION_MAJOR < 59}
{$DEFINE FF_API_TAG_STRING}
{$ENDIF}
{$ENDIF}
{$IFNDEF FF_API_GETCHROMA}
{$IF LIBAVCODEC_VERSION_MAJOR < 59}
{$DEFINE FF_API_GETCHROMA}
{$ENDIF}
{$ENDIF}
{$IFNDEF FF_API_CODEC_GET_SET}
{$IF LIBAVCODEC_VERSION_MAJOR < 59}
{$DEFINE FF_API_CODEC_GET_SET}
{$ENDIF}
{$ENDIF}
{$IFNDEF FF_API_USER_VISIBLE_AVHWACCEL}
{$IF LIBAVCODEC_VERSION_MAJOR < 59}
{$DEFINE FF_API_USER_VISIBLE_AVHWACCEL}
{$ENDIF}
{$ENDIF}
{$IFNDEF FF_API_LOCKMGR}
{$IF LIBAVCODEC_VERSION_MAJOR < 59}
{$DEFINE FF_API_LOCKMGR}
{$ENDIF}
{$ENDIF}
{$IFNDEF FF_API_NEXT}
{$IF LIBAVCODEC_VERSION_MAJOR < 59}
{$DEFINE FF_API_NEXT}
{$ENDIF}
{$ENDIF}
{$IFNDEF FF_API_UNSANITIZED_BITRATES}
{$IF LIBAVCODEC_VERSION_MAJOR < 59}
{$DEFINE FF_API_UNSANITIZED_BITRATES}
{$ENDIF}
{$ENDIF}
{$ENDREGION}
{$REGION 'avdevice'}
LIBAVDEVICE_VERSION_MAJOR = 58;
LIBAVDEVICE_VERSION_MAJOR_STR = '58';
LIBAVDEVICE_VERSION_MINOR = 8;
LIBAVDEVICE_VERSION_MICRO = 100;
{$ENDREGION}
{$REGION 'avformat'}
LIBAVFORMAT_VERSION_MAJOR = 58;
LIBAVFORMAT_VERSION_MAJOR_STR = '58';
LIBAVFORMAT_VERSION_MINOR = 29;
LIBAVFORMAT_VERSION_MICRO = 100;
{$IFNDEF FF_API_COMPUTE_PKT_FIELDS2}
{$IF LIBAVFORMAT_VERSION_MAJOR < 59}
{$DEFINE FF_API_COMPUTE_PKT_FIELDS2} // Отсутсвует
{$ENDIF}
{$ENDIF}
{$IFNDEF FF_API_OLD_OPEN_CALLBACKS}
{$IF LIBAVFORMAT_VERSION_MAJOR < 59}
{$DEFINE FF_API_OLD_OPEN_CALLBACKS}
{$ENDIF}
{$ENDIF}
{$IFNDEF FF_API_LAVF_AVCTX}
{$IF LIBAVFORMAT_VERSION_MAJOR < 59}
{$DEFINE FF_API_LAVF_AVCTX}
{$ENDIF}
{$ENDIF}
{$IFNDEF FF_API_HTTP_USER_AGENT}
{$IF LIBAVFORMAT_VERSION_MAJOR < 59}
{$DEFINE FF_API_HTTP_USER_AGENT} // Отсутсвует
{$ENDIF}
{$ENDIF}
{$IFNDEF FF_API_HLS_WRAP}
{$IF LIBAVFORMAT_VERSION_MAJOR < 59}
{$DEFINE FF_API_HLS_WRAP} // Отсутсвует
{$ENDIF}
{$ENDIF}
{$IFNDEF FF_API_HLS_USE_LOCALTIME}
{$IF LIBAVFORMAT_VERSION_MAJOR < 59}
{$DEFINE FF_API_HLS_USE_LOCALTIME} // Отсутсвует
{$ENDIF}
{$ENDIF}
{$IFNDEF FF_API_LAVF_KEEPSIDE_FLAG}
{$IF LIBAVFORMAT_VERSION_MAJOR < 59}
{$DEFINE FF_API_LAVF_KEEPSIDE_FLAG} // Отсутсвует
{$ENDIF}
{$ENDIF}
{$IFNDEF FF_API_OLD_ROTATE_API}
{$IF LIBAVFORMAT_VERSION_MAJOR < 59}
{$DEFINE FF_API_OLD_ROTATE_API} // Отсутсвует
{$ENDIF}
{$ENDIF}
{$IFNDEF FF_API_FORMAT_GET_SET}
{$IF LIBAVFORMAT_VERSION_MAJOR < 59}
{$DEFINE FF_API_FORMAT_GET_SET}
{$ENDIF}
{$ENDIF}
{$IFNDEF FF_API_OLD_AVIO_EOF_0}
{$IF LIBAVFORMAT_VERSION_MAJOR < 59}
{$DEFINE FF_API_OLD_AVIO_EOF_0} // Отсутсвует
{$ENDIF}
{$ENDIF}
{$IFNDEF FF_API_LAVF_FFSERVER}
{$IF LIBAVFORMAT_VERSION_MAJOR < 59}
{$DEFINE FF_API_LAVF_FFSERVER}
{$ENDIF}
{$ENDIF}
{$IFNDEF FF_API_FORMAT_FILENAME}
{$IF LIBAVFORMAT_VERSION_MAJOR < 59}
{$DEFINE FF_API_FORMAT_FILENAME}
{$ENDIF}
{$ENDIF}
{$IFNDEF FF_API_OLD_RTSP_OPTIONS}
{$IF LIBAVFORMAT_VERSION_MAJOR < 59}
{$DEFINE FF_API_OLD_RTSP_OPTIONS} // Отсутсвует
{$ENDIF}
{$ENDIF}
{$IFNDEF FF_API_NEXT}
{$IF LIBAVFORMAT_VERSION_MAJOR < 59}
{$DEFINE FF_API_NEXT}
{$ENDIF}
{$ENDIF}
{$IFNDEF FF_API_DASH_MIN_SEG_DURATION}
{$IF LIBAVFORMAT_VERSION_MAJOR < 59}
{$DEFINE FF_API_DASH_MIN_SEG_DURATION} // Отсутсвует
{$ENDIF}
{$ENDIF}
{$IFNDEF FF_API_LAVF_MP4A_LATM}
{$IF LIBAVFORMAT_VERSION_MAJOR < 59}
{$DEFINE FF_API_LAVF_MP4A_LATM} // Отсутсвует
{$ENDIF}
{$ENDIF}
{$IFNDEF FF_API_AVIOFORMAT}
{$IF LIBAVFORMAT_VERSION_MAJOR < 59}
{$DEFINE FF_API_AVIOFORMAT}
{$ENDIF}
{$ENDIF}
{$IFNDEF FF_API_R_FRAME_RATE}
FF_API_R_FRAME_RATE = 1;
{$ENDIF}
{$ENDREGION}
{$REGION 'postproc'}
LIBPOSTPROC_VERSION_MAJOR = 55;
LIBPOSTPROC_VERSION_MAJOR_STR = '55';
LIBPOSTPROC_VERSION_MINOR = 5;
LIBPOSTPROC_VERSION_MICRO = 100;
{$ENDREGION}
{$REGION 'swresample.h'}
LIBSWRESAMPLE_VERSION_MAJOR = 3;
LIBSWRESAMPLE_VERSION_MAJOR_STR = '3';
LIBSWRESAMPLE_VERSION_MINOR = 5;
LIBSWRESAMPLE_VERSION_MICRO = 100;
{$IFNDEF FF_API_SWS_VECTOR}
{$IF LIBSWSCALE_VERSION_MAJOR < 6}
{$DEFINE FF_API_SWS_VECTOR}
{$ENDIF}
{$ENDIF}
{$ENDREGION}
{$REGION 'avfilter.h'}
LIBAVFILTER_VERSION_MAJOR = 7;
LIBAVFILTER_VERSION_MAJOR_STR = '7';
LIBAVFILTER_VERSION_MINOR = 57;
LIBAVFILTER_VERSION_MICRO = 100;
{$IFNDEF FF_API_OLD_FILTER_OPTS_ERROR}
{$IF LIBAVFILTER_VERSION_MAJOR < 8}
{$DEFINE FF_API_OLD_FILTER_OPTS_ERROR} // Отсутсвует
{$ENDIF}
{$ENDIF}
{$IFNDEF FF_API_LAVR_OPTS}
{$IF LIBAVFILTER_VERSION_MAJOR < 8}
{$DEFINE FF_API_LAVR_OPTS}
{$ENDIF}
{$ENDIF}
{$IFNDEF FF_API_FILTER_GET_SET}
{$IF LIBAVFILTER_VERSION_MAJOR < 8}
{$DEFINE FF_API_FILTER_GET_SET}
{$ENDIF}
{$ENDIF}
{$IFNDEF FF_API_NEXT}
{$IF LIBAVFILTER_VERSION_MAJOR < 8}
{$DEFINE FF_API_NEXT}
{$ENDIF}
{$ENDIF}
{$DEFINE FF_INTERNAL_FIELDS}
{$ENDREGION}
swscale_dll = 'swscale-' + LIBSWSCALE_VERSION_MAJOR_STR + '.dll';
avutil_dll = 'avutil-' + LIBAVUTIL_VERSION_MAJOR_STR + '.dll';
avcodec_dll = 'avcodec-' + LIBAVCODEC_VERSION_MAJOR_STR + '.dll';
avdevice_dll = 'avdevice-' + LIBAVDEVICE_VERSION_MAJOR_STR + '.dll';
avformat_dll = 'avformat-' + LIBAVFORMAT_VERSION_MAJOR_STR + '.dll';
postproc_dll = 'postproc-' + LIBPOSTPROC_VERSION_MAJOR_STR + '.dll';
swresample_dll = 'swresample-' + LIBSWRESAMPLE_VERSION_MAJOR_STR + '.dll';
avfilter_dll = 'avfilter-' + LIBAVFILTER_VERSION_MAJOR_STR + '.dll';

99
ffmpeg/ffmpeg_types.pas Normal file
View File

@ -0,0 +1,99 @@
unit ffmpeg_types;
{$IFDEF FPC}
{$MODE Delphi}
{$ENDIF}
interface
Type
Bool = WordBool;
float = Single;
ppDouble = ^pDouble;
size_t = NativeUInt;
psize_t = ^size_t;
ptrdiff_t = UInt32;
uint32_t = Cardinal;
unsigned = uint32_t;
unsignedint = UInt32;
UINT = unsigned;
unsigned_int = UInt32;
punsigned_int = ^unsigned_int;
unsigned_long = Cardinal;
unsignedchar = Byte;
unsigned_char = unsignedchar;
punsignedchar = PByte; // ^unsignedchar;
punsigned_char = punsignedchar;
Int = Integer;
pint = ^Int;
ppint = ^pint;
int8_t = Int8;
pint8_t = ^int8_t;
uint8_t = Byte;
puint8_t = PByte; // ^uint8_t;
ppuint8_t = ^puint8_t;
PPByte = ppuint8_t;
int16_t = int16;
pint16_t = ^int16_t;
uint16_t = UInt16;
puint16_t = ^uint16_t;
int32_t = Int32;
pint32_t = ^int32_t;
ppint32_t = ^pint32_t;
int64_t = Int64;
pint64_t = ^int64_t;
uint64_t = UInt64;
puint64_t = ^uint64_t;
array_uint8_t = array [0 .. 0] of uint8_t;
parray_uint8_t = ^array_uint8_t;
array_int = array [0 .. 0] of Int;
parray_int = ^array_int;
array4_int = array [0 .. 3] of Int;
parray4_int = ^array4_int;
array4_puint8_t = array [0 .. 3] of puint8_t;
parray4_puint8_t = ^array4_puint8_t;
array4_ptrdiff_t = array [0 .. 3] of ptrdiff_t;
parray4_ptrdiff_t = ^array4_ptrdiff_t;
time_t = LongInt;
AnsiCharArray = array [0 .. 0] of pAnsiChar;
pAnsiCharArray = ^AnsiCharArray;
(* MICROSOFT VC++ STDIO'S FILE DEFINITION *)
_iobuf = record
_ptr: pAnsiChar;
_cnt: Integer;
_base: pAnsiChar;
_flag: Integer;
_file: Integer;
_charbuf: Integer;
_bufsiz: Integer;
_tmpfname: pAnsiChar;
end;
PFile = ^TFile;
TFile = _iobuf;
pAVHWAccel = Pointer;
ppAVCodecHWConfigInternal = Pointer;
const
max_unsigned = $FFFF;
implementation
end.

6785
ffmpeg/libavcodec.pas Normal file

File diff suppressed because it is too large Load Diff

522
ffmpeg/libavdevice.pas Normal file
View File

@ -0,0 +1,522 @@
unit libavdevice;
{$IFDEF FPC}
{$MODE Delphi}
{$ENDIF}
interface
Uses
ffmpeg_types, libavutil, libavcodec, libavformat;
{$I ffmpeg.inc}
(* *
* @defgroup lavd libavdevice
* Special devices muxing/demuxing library.
*
* Libavdevice is a complementary library to @ref libavf "libavformat". It
* provides various "special" platform-specific muxers and demuxers, e.g. for
* grabbing devices, audio capture and playback etc. As a consequence, the
* (de)muxers in libavdevice are of the AVFMT_NOFILE type (they use their own
* I/O functions). The filename passed to avformat_open_input() often does not
* refer to an actually existing file, but has some special device-specific
* meaning - e.g. for xcbgrab it is the display name.
*
* To use libavdevice, simply call avdevice_register_all() to register all
* compiled muxers and demuxers. They all use standard libavformat API.
*
*)
(* *
* Return the LIBAVDEVICE_VERSION_INT constant.
*)
// unsigned avdevice_version(void);
function avdevice_version(): unsigned; cdecl; external avdevice_dll;
(* *
* Return the libavdevice build-time configuration.
*)
// const char *avdevice_configuration(void);
function avdevice_configuration(): pAnsiChar; cdecl; external avdevice_dll;
(* *
* Return the libavdevice license.
*)
// const char *avdevice_license(void);
function avdevice_license(): pAnsiChar; cdecl; external avdevice_dll;
(* *
* Initialize libavdevice and register all the input and output devices.
*)
// void avdevice_register_all(void);
procedure avdevice_register_all(); cdecl; external avdevice_dll;
(* *
* Audio input devices iterator.
*
* If d is NULL, returns the first registered input audio/video device,
* if d is non-NULL, returns the next registered input audio/video device after d
* or NULL if d is the last one.
*)
// AVInputFormat *av_input_audio_device_next(AVInputFormat *d);
function av_input_audio_device_next(d: pAVInputFormat): pAVInputFormat; cdecl; external avdevice_dll;
(* *
* Video input devices iterator.
*
* If d is NULL, returns the first registered input audio/video device,
* if d is non-NULL, returns the next registered input audio/video device after d
* or NULL if d is the last one.
*)
// AVInputFormat *av_input_video_device_next(AVInputFormat *d);
function av_input_video_device_next(d: pAVInputFormat): pAVInputFormat; cdecl; external avdevice_dll;
(* *
* Audio output devices iterator.
*
* If d is NULL, returns the first registered output audio/video device,
* if d is non-NULL, returns the next registered output audio/video device after d
* or NULL if d is the last one.
*)
// AVOutputFormat *av_output_audio_device_next(AVOutputFormat *d);
function av_output_audio_device_next(d: pAVOutputFormat): pAVOutputFormat; cdecl; external avdevice_dll;
(* *
* Video output devices iterator.
*
* If d is NULL, returns the first registered output audio/video device,
* if d is non-NULL, returns the next registered output audio/video device after d
* or NULL if d is the last one.
*)
// AVOutputFormat *av_output_video_device_next(AVOutputFormat *d);
function av_output_video_device_next(d: pAVOutputFormat): pAVOutputFormat; cdecl; external avdevice_dll;
type
pAVDeviceRect = ^AVDeviceRect;
AVDeviceRect = record
x: int; (* *< x coordinate of top left corner *)
y: int; (* *< y coordinate of top left corner *)
width: int; (* *< width *)
height: int; (* *< height *)
end;
(* *
* Message types used by avdevice_app_to_dev_control_message().
*)
AVAppToDevMessageType = array [0 .. 3] of AnsiChar;
const
(* *
* Dummy message.
*)
AV_APP_TO_DEV_NONE: AVAppToDevMessageType = ('N', 'O', 'N', 'E');
(* *
* Window size change message.
*
* Message is sent to the device every time the application changes the size
* of the window device renders to.
* Message should also be sent right after window is created.
*
* data: AVDeviceRect: new window size.
*)
AV_APP_TO_DEV_WINDOW_SIZE: AVAppToDevMessageType = ('G', 'E', 'O', 'M');
(* *
* Repaint request message.
*
* Message is sent to the device when window has to be repainted.
*
* data: AVDeviceRect: area required to be repainted.
* NULL: whole area is required to be repainted.
*)
AV_APP_TO_DEV_WINDOW_REPAINT: AVAppToDevMessageType = ('R', 'E', 'P', 'A');
(* *
* Request pause/play.
*
* Application requests pause/unpause playback.
* Mostly usable with devices that have internal buffer.
* By default devices are not paused.
*
* data: NULL
*)
AV_APP_TO_DEV_PAUSE: AVAppToDevMessageType = ('P', 'A', 'U', ' ');
AV_APP_TO_DEV_PLAY: AVAppToDevMessageType = ('P', 'L', 'A', 'Y');
AV_APP_TO_DEV_TOGGLE_PAUSE: AVAppToDevMessageType = ('P', 'A', 'U', 'T');
(* *
* Volume control message.
*
* Set volume level. It may be device-dependent if volume
* is changed per stream or system wide. Per stream volume
* change is expected when possible.
*
* data: double: new volume with range of 0.0 - 1.0.
*)
AV_APP_TO_DEV_SET_VOLUME: AVAppToDevMessageType = ('S', 'V', 'O', 'L');
(* *
* Mute control messages.
*
* Change mute state. It may be device-dependent if mute status
* is changed per stream or system wide. Per stream mute status
* change is expected when possible.
*
* data: NULL.
*)
AV_APP_TO_DEV_MUTE: AVAppToDevMessageType = (' ', 'M', 'U', 'T');
AV_APP_TO_DEV_UNMUTE: AVAppToDevMessageType = ('U', 'M', 'U', 'T');
AV_APP_TO_DEV_TOGGLE_MUTE: AVAppToDevMessageType = ('T', 'M', 'U', 'T');
(* *
* Get volume/mute messages.
*
* Force the device to send AV_DEV_TO_APP_VOLUME_LEVEL_CHANGED or
* AV_DEV_TO_APP_MUTE_STATE_CHANGED command respectively.
*
* data: NULL.
*)
AV_APP_TO_DEV_GET_VOLUME: AVAppToDevMessageType = ('G', 'V', 'O', 'L');
AV_APP_TO_DEV_GET_MUTE: AVAppToDevMessageType = ('G', 'M', 'U', 'T');
type
AVDevToAppMessageType = array [0 .. 3] of AnsiChar;
const
(* *
* Message types used by avdevice_dev_to_app_control_message().
*)
(* *
* Dummy message.
*)
AV_DEV_TO_APP_NONE: AVDevToAppMessageType = ('N', 'O', 'N', 'E');
(* *
* Create window buffer message.
*
* Device requests to create a window buffer. Exact meaning is device-
* and application-dependent. Message is sent before rendering first
* frame and all one-shot initializations should be done here.
* Application is allowed to ignore preferred window buffer size.
*
* @note: Application is obligated to inform about window buffer size
* with AV_APP_TO_DEV_WINDOW_SIZE message.
*
* data: AVDeviceRect: preferred size of the window buffer.
* NULL: no preferred size of the window buffer.
*)
AV_DEV_TO_APP_CREATE_WINDOW_BUFFER: AVDevToAppMessageType = ('B', 'C', 'R', 'E');
(* *
* Prepare window buffer message.
*
* Device requests to prepare a window buffer for rendering.
* Exact meaning is device- and application-dependent.
* Message is sent before rendering of each frame.
*
* data: NULL.
*)
AV_DEV_TO_APP_PREPARE_WINDOW_BUFFER: AVDevToAppMessageType = ('B', 'P', 'R', 'E');
(* *
* Display window buffer message.
*
* Device requests to display a window buffer.
* Message is sent when new frame is ready to be displayed.
* Usually buffers need to be swapped in handler of this message.
*
* data: NULL.
*)
AV_DEV_TO_APP_DISPLAY_WINDOW_BUFFER: AVDevToAppMessageType = ('B', 'D', 'I', 'S');
(* *
* Destroy window buffer message.
*
* Device requests to destroy a window buffer.
* Message is sent when device is about to be destroyed and window
* buffer is not required anymore.
*
* data: NULL.
*)
AV_DEV_TO_APP_DESTROY_WINDOW_BUFFER: AVDevToAppMessageType = ('B', 'D', 'E', 'S');
(* *
* Buffer fullness status messages.
*
* Device signals buffer overflow/underflow.
*
* data: NULL.
*)
AV_DEV_TO_APP_BUFFER_OVERFLOW: AVDevToAppMessageType = ('B', 'O', 'F', 'L');
AV_DEV_TO_APP_BUFFER_UNDERFLOW: AVDevToAppMessageType = ('B', 'U', 'F', 'L');
(* *
* Buffer readable/writable.
*
* Device informs that buffer is readable/writable.
* When possible, device informs how many bytes can be read/write.
*
* @warning Device may not inform when number of bytes than can be read/write changes.
*
* data: int64_t: amount of bytes available to read/write.
* NULL: amount of bytes available to read/write is not known.
*)
AV_DEV_TO_APP_BUFFER_READABLE: AVDevToAppMessageType = ('B', 'R', 'D', ' ');
AV_DEV_TO_APP_BUFFER_WRITABLE: AVDevToAppMessageType = ('B', 'W', 'R', ' ');
(* *
* Mute state change message.
*
* Device informs that mute state has changed.
*
* data: int: 0 for not muted state, non-zero for muted state.
*)
AV_DEV_TO_APP_MUTE_STATE_CHANGED: AVDevToAppMessageType = ('C', 'M', 'U', 'T');
(* *
* Volume level change message.
*
* Device informs that volume level has changed.
*
* data: double: new volume with range of 0.0 - 1.0.
*)
AV_DEV_TO_APP_VOLUME_LEVEL_CHANGED: AVDevToAppMessageType = ('C', 'V', 'O', 'L');
(* *
* Send control message from application to device.
*
* @param s device context.
* @param type message type.
* @param data message data. Exact type depends on message type.
* @param data_size size of message data.
* @return >= 0 on success, negative on error.
* AVERROR(ENOSYS) when device doesn't implement handler of the message.
*)
// int avdevice_app_to_dev_control_message(struct AVFormatContext *s,
// enum AVAppToDevMessageType type,
// void *data, size_t data_size);
function avdevice_app_to_dev_control_message(s: pAVFormatContext; _type: AVAppToDevMessageType; data: Pointer; data_size: size_t): int;
cdecl; external avdevice_dll;
(* *
* Send control message from device to application.
*
* @param s device context.
* @param type message type.
* @param data message data. Can be NULL.
* @param data_size size of message data.
* @return >= 0 on success, negative on error.
* AVERROR(ENOSYS) when application doesn't implement handler of the message.
*)
// int avdevice_dev_to_app_control_message(struct AVFormatContext *s,
// enum AVDevToAppMessageType type,
// void *data, size_t data_size);
function avdevice_dev_to_app_control_message(s: pAVFormatContext; _type: AVDevToAppMessageType; data: Pointer; data_size: size_t): int;
cdecl; external avdevice_dll;
(* *
* Following API allows user to probe device capabilities (supported codecs,
* pixel formats, sample formats, resolutions, channel counts, etc).
* It is build on top op AVOption API.
* Queried capabilities make it possible to set up converters of video or audio
* parameters that fit to the device.
*
* List of capabilities that can be queried:
* - Capabilities valid for both audio and video devices:
* - codec: supported audio/video codecs.
* type: AV_OPT_TYPE_INT (AVCodecID value)
* - Capabilities valid for audio devices:
* - sample_format: supported sample formats.
* type: AV_OPT_TYPE_INT (AVSampleFormat value)
* - sample_rate: supported sample rates.
* type: AV_OPT_TYPE_INT
* - channels: supported number of channels.
* type: AV_OPT_TYPE_INT
* - channel_layout: supported channel layouts.
* type: AV_OPT_TYPE_INT64
* - Capabilities valid for video devices:
* - pixel_format: supported pixel formats.
* type: AV_OPT_TYPE_INT (AVPixelFormat value)
* - window_size: supported window sizes (describes size of the window size presented to the user).
* type: AV_OPT_TYPE_IMAGE_SIZE
* - frame_size: supported frame sizes (describes size of provided video frames).
* type: AV_OPT_TYPE_IMAGE_SIZE
* - fps: supported fps values
* type: AV_OPT_TYPE_RATIONAL
*
* Value of the capability may be set by user using av_opt_set() function
* and AVDeviceCapabilitiesQuery object. Following queries will
* limit results to the values matching already set capabilities.
* For example, setting a codec may impact number of formats or fps values
* returned during next query. Setting invalid value may limit results to zero.
*
* Example of the usage basing on opengl output device:
*
* @code
* AVFormatContext *oc = NULL;
* AVDeviceCapabilitiesQuery *caps = NULL;
* AVOptionRanges *ranges;
* int ret;
*
* if ((ret = avformat_alloc_output_context2(&oc, NULL, "opengl", NULL)) < 0)
* goto fail;
* if (avdevice_capabilities_create(&caps, oc, NULL) < 0)
* goto fail;
*
* //query codecs
* if (av_opt_query_ranges(&ranges, caps, "codec", AV_OPT_MULTI_COMPONENT_RANGE)) < 0)
* goto fail;
* //pick codec here and set it
* av_opt_set(caps, "codec", AV_CODEC_ID_RAWVIDEO, 0);
*
* //query format
* if (av_opt_query_ranges(&ranges, caps, "pixel_format", AV_OPT_MULTI_COMPONENT_RANGE)) < 0)
* goto fail;
* //pick format here and set it
* av_opt_set(caps, "pixel_format", AV_PIX_FMT_YUV420P, 0);
*
* //query and set more capabilities
*
* fail:
* //clean up code
* avdevice_capabilities_free(&query, oc);
* avformat_free_context(oc);
* @endcode
*)
type
(* *
* Structure describes device capabilities.
*
* It is used by devices in conjunction with av_device_capabilities AVOption table
* to implement capabilities probing API based on AVOption API. Should not be used directly.
*)
pAVDeviceCapabilitiesQuery = ^AVDeviceCapabilitiesQuery;
AVDeviceCapabilitiesQuery = record
av_class: pAVClass;
device_context: pAVFormatContext;
codec: AVCodecID;
sample_format: AVSampleFormat;
pixel_format: AVPixelFormat;
sample_rate: int;
channels: int;
channel_layout: int64_t;
window_width: int;
window_height: int;
frame_width: int;
frame_height: int;
fps: AVRational;
end;
(* *
* AVOption table used by devices to implement device capabilities API. Should not be used by a user.
*)
// extern const AVOption av_device_capabilities[];
(* *
* Initialize capabilities probing API based on AVOption API.
*
* avdevice_capabilities_free() must be called when query capabilities API is
* not used anymore.
*
* @param[out] caps Device capabilities data. Pointer to a NULL pointer must be passed.
* @param s Context of the device.
* @param device_options An AVDictionary filled with device-private options.
* On return this parameter will be destroyed and replaced with a dict
* containing options that were not found. May be NULL.
* The same options must be passed later to avformat_write_header() for output
* devices or avformat_open_input() for input devices, or at any other place
* that affects device-private options.
*
* @return >= 0 on success, negative otherwise.
*)
// int avdevice_capabilities_create(AVDeviceCapabilitiesQuery **caps, AVFormatContext *s,
// AVDictionary **device_options);
function avdevice_capabilities_create(var caps: pAVDeviceCapabilitiesQuery; s: pAVFormatContext; var device_options: pAVDictionary): int;
cdecl; external avdevice_dll;
(* *
* Free resources created by avdevice_capabilities_create()
*
* @param caps Device capabilities data to be freed.
* @param s Context of the device.
*)
// void avdevice_capabilities_free(AVDeviceCapabilitiesQuery **caps, AVFormatContext *s);
procedure avdevice_capabilities_free(var caps: pAVDeviceCapabilitiesQuery; s: pAVFormatContext); cdecl; external avdevice_dll;
type
(* *
* Structure describes basic parameters of the device.
*)
pAVDeviceInfo = ^AVDeviceInfo;
ppAVDeviceInfo = ^pAVDeviceInfo;
AVDeviceInfo = record
device_name: pAnsiChar; (* *< device name, format depends on device *)
device_description: pAnsiChar; (* *< human friendly name *)
end;
(* *
* List of devices.
*)
pAVDeviceInfoList = ^AVDeviceInfoList;
AVDeviceInfoList = record
devices: ppAVDeviceInfo; (* *< list of autodetected devices *)
nb_devices: int; (* *< number of autodetected devices *)
default_device: int; (* *< index of default device or -1 if no default *)
end;
(* *
* List devices.
*
* Returns available device names and their parameters.
*
* @note: Some devices may accept system-dependent device names that cannot be
* autodetected. The list returned by this function cannot be assumed to
* be always completed.
*
* @param s device context.
* @param[out] device_list list of autodetected devices.
* @return count of autodetected devices, negative on error.
*)
// int avdevice_list_devices(struct AVFormatContext *s, AVDeviceInfoList **device_list);
function avdevice_list_devices(s: pAVFormatContext; var device_list: pAVDeviceInfoList): int; cdecl; external avdevice_dll;
(* *
* Convenient function to free result of avdevice_list_devices().
*
* @param devices device list to be freed.
*)
// void avdevice_free_list_devices(AVDeviceInfoList **device_list);
procedure avdevice_free_list_devices(var device_list: pAVDeviceInfoList); cdecl; external avdevice_dll;
(* *
* List devices.
*
* Returns available device names and their parameters.
* These are convinient wrappers for avdevice_list_devices().
* Device context is allocated and deallocated internally.
*
* @param device device format. May be NULL if device name is set.
* @param device_name device name. May be NULL if device format is set.
* @param device_options An AVDictionary filled with device-private options. May be NULL.
* The same options must be passed later to avformat_write_header() for output
* devices or avformat_open_input() for input devices, or at any other place
* that affects device-private options.
* @param[out] device_list list of autodetected devices
* @return count of autodetected devices, negative on error.
* @note device argument takes precedence over device_name when both are set.
*)
// int avdevice_list_input_sources(struct AVInputFormat *device, const char *device_name,
// AVDictionary *device_options, AVDeviceInfoList **device_list);
function avdevice_list_input_sources(device: pAVInputFormat; const device_name: pAnsiChar; device_options: pAVDictionary;
var device_list: pAVDeviceInfoList): int; cdecl; external avdevice_dll;
// int avdevice_list_output_sinks(struct AVOutputFormat *device, const char *device_name,
// AVDictionary *device_options, AVDeviceInfoList **device_list);
function avdevice_list_output_sinks(device: pAVOutputFormat; const device_name: pAnsiChar; device_options: pAVDictionary;
var device_list: pAVDeviceInfoList): int; cdecl; external avdevice_dll;
implementation
end.

1482
ffmpeg/libavfilter.pas Normal file

File diff suppressed because it is too large Load Diff

4218
ffmpeg/libavformat.pas Normal file

File diff suppressed because it is too large Load Diff

9586
ffmpeg/libavutil.pas Normal file

File diff suppressed because it is too large Load Diff

108
ffmpeg/libpostproc.pas Normal file
View File

@ -0,0 +1,108 @@
unit libpostproc;
{$IFDEF FPC}
{$MODE Delphi}
{$ENDIF}
interface
Uses
ffmpeg_types;
{$I ffmpeg.inc}
(* *
* Return the LIBPOSTPROC_VERSION_INT constant.
*)
// unsigned postproc_version(void);
function postproc_version(): unsigned; cdecl; external postproc_dll;
(* *
* Return the libpostproc build-time configuration.
*)
// const char *postproc_configuration(void);
function postproc_configuration(): pAnsiChar; cdecl; external postproc_dll;
(* *
* Return the libpostproc license.
*)
// const char *postproc_license(void);
function postproc_license(): pAnsiChar; cdecl; external postproc_dll;
const
PP_QUALITY_MAX = 6;
// #include <inttypes.h>
type
ppp_context = ^pp_context;
pp_context = record
end;
ppp_mode = ^pp_mode;
pp_mode = record
end;
Tpp_src_puint8_t = array [0 .. 2] of puint8_t;
Tpp_dst_puint8_t = Tpp_src_puint8_t;
Tpp_srcStride_int = array [0 .. 2] of int;
Tpp_dstStride_int = Tpp_srcStride_int;
(*
#if LIBPOSTPROC_VERSION_INT < (52<<16)
typedef pp_context pp_context_t;
typedef pp_mode pp_mode_t;
extern const char *const pp_help; ///< a simple help text
#else
extern const char pp_help[]; ///< a simple help text
#endif
*)
// void pp_postprocess(const uint8_t * src[3], const int srcStride[3],
// uint8_t * dst[3], const int dstStride[3],
// int horizontalSize, int verticalSize,
// const int8_t *QP_store, int QP_stride,
// pp_mode *mode, pp_context *ppContext, int pict_type);
procedure pp_postprocess(const src: Tpp_src_puint8_t; const srcStride: Tpp_srcStride_int; dst: Tpp_dst_puint8_t;
const dstStride: Tpp_dstStride_int; horizontalSize: int; verticalSize: int; const QP_store: pint8_t; QP_stride: int; mode: ppp_mode;
ppContext: ppp_context; pict_type: int); cdecl; external postproc_dll;
(* *
* Return a pp_mode or NULL if an error occurred.
*
* @param name the string after "-pp" on the command line
* @param quality a number from 0 to PP_QUALITY_MAX
*)
// pp_mode *pp_get_mode_by_name_and_quality(const char *name, int quality);
function pp_get_mode_by_name_and_quality(const name: pAnsiChar; quality: int): ppp_mode; cdecl; external postproc_dll;
// void pp_free_mode(pp_mode *mode);
procedure pp_free_mode(mode: ppp_mode); cdecl; external postproc_dll;
// pp_context *pp_get_context(int width, int height, int flags);
function pp_get_context(width: int; height: int; flags: int): ppp_context; cdecl; external postproc_dll;
// void pp_free_context(pp_context *ppContext);
procedure pp_free_context(ppContext: ppp_context); cdecl; external postproc_dll;
const
PP_CPU_CAPS_MMX = $80000000;
PP_CPU_CAPS_MMX2 = $20000000;
PP_CPU_CAPS_3DNOW = $40000000;
PP_CPU_CAPS_ALTIVEC = $10000000;
PP_CPU_CAPS_AUTO = $00080000;
PP_FORMAT = $00000008;
PP_FORMAT_420 = ($00000011 or PP_FORMAT);
PP_FORMAT_422 = ($00000001 or PP_FORMAT);
PP_FORMAT_411 = ($00000002 or PP_FORMAT);
PP_FORMAT_444 = ($00000000 or PP_FORMAT);
PP_FORMAT_440 = ($00000010 or PP_FORMAT);
PP_PICT_TYPE_QP2 = $00000010;
/// < MPEG2 style QScale
implementation
end.

564
ffmpeg/libswresample.pas Normal file
View File

@ -0,0 +1,564 @@
unit libswresample;
{$IFDEF FPC}
{$MODE Delphi}
{$ENDIF}
interface
Uses
ffmpeg_types, libavutil;
{$I ffmpeg.inc}
(* *
* @defgroup lswr libswresample
* @{
*
* Audio resampling, sample format conversion and mixing library.
*
* Interaction with lswr is done through SwrContext, which is
* allocated with swr_alloc() or swr_alloc_set_opts(). It is opaque, so all parameters
* must be set with the @ref avoptions API.
*
* The first thing you will need to do in order to use lswr is to allocate
* SwrContext. This can be done with swr_alloc() or swr_alloc_set_opts(). If you
* are using the former, you must set options through the @ref avoptions API.
* The latter function provides the same feature, but it allows you to set some
* common options in the same statement.
*
* For example the following code will setup conversion from planar float sample
* format to interleaved signed 16-bit integer, downsampling from 48kHz to
* 44.1kHz and downmixing from 5.1 channels to stereo (using the default mixing
* matrix). This is using the swr_alloc() function.
* @code
* SwrContext *swr = swr_alloc();
* av_opt_set_channel_layout(swr, "in_channel_layout", AV_CH_LAYOUT_5POINT1, 0);
* av_opt_set_channel_layout(swr, "out_channel_layout", AV_CH_LAYOUT_STEREO, 0);
* av_opt_set_int(swr, "in_sample_rate", 48000, 0);
* av_opt_set_int(swr, "out_sample_rate", 44100, 0);
* av_opt_set_sample_fmt(swr, "in_sample_fmt", AV_SAMPLE_FMT_FLTP, 0);
* av_opt_set_sample_fmt(swr, "out_sample_fmt", AV_SAMPLE_FMT_S16, 0);
* @endcode
*
* The same job can be done using swr_alloc_set_opts() as well:
* @code
* SwrContext *swr = swr_alloc_set_opts(NULL, // we're allocating a new context
* AV_CH_LAYOUT_STEREO, // out_ch_layout
* AV_SAMPLE_FMT_S16, // out_sample_fmt
* 44100, // out_sample_rate
* AV_CH_LAYOUT_5POINT1, // in_ch_layout
* AV_SAMPLE_FMT_FLTP, // in_sample_fmt
* 48000, // in_sample_rate
* 0, // log_offset
* NULL); // log_ctx
* @endcode
*
* Once all values have been set, it must be initialized with swr_init(). If
* you need to change the conversion parameters, you can change the parameters
* using @ref AVOptions, as described above in the first example; or by using
* swr_alloc_set_opts(), but with the first argument the allocated context.
* You must then call swr_init() again.
*
* The conversion itself is done by repeatedly calling swr_convert().
* Note that the samples may get buffered in swr if you provide insufficient
* output space or if sample rate conversion is done, which requires "future"
* samples. Samples that do not require future input can be retrieved at any
* time by using swr_convert() (in_count can be set to 0).
* At the end of conversion the resampling buffer can be flushed by calling
* swr_convert() with NULL in and 0 in_count.
*
* The samples used in the conversion process can be managed with the libavutil
* @ref lavu_sampmanip "samples manipulation" API, including av_samples_alloc()
* function used in the following example.
*
* The delay between input and output, can at any time be found by using
* swr_get_delay().
*
* The following code demonstrates the conversion loop assuming the parameters
* from above and caller-defined functions get_input() and handle_output():
* @code
* uint8_t **input;
* int in_samples;
*
* while (get_input(&input, &in_samples)) {
* uint8_t *output;
* int out_samples = av_rescale_rnd(swr_get_delay(swr, 48000) +
* in_samples, 44100, 48000, AV_ROUND_UP);
* av_samples_alloc(&output, NULL, 2, out_samples,
* AV_SAMPLE_FMT_S16, 0);
* out_samples = swr_convert(swr, &output, out_samples,
* input, in_samples);
* handle_output(output, out_samples);
* av_freep(&output);
* }
* @endcode
*
* When the conversion is finished, the conversion
* context and everything associated with it must be freed with swr_free().
* A swr_close() function is also available, but it exists mainly for
* compatibility with libavresample, and is not required to be called.
*
* There will be no memory leak if the data is not completely flushed before
* swr_free().
*)
// #include <stdint.h>
// #include "libavutil/channel_layout.h"
// #include "libavutil/frame.h"
// #include "libavutil/samplefmt.h"
// #include "libswresample/version.h"
const
(* *
* @name Option constants
* These constants are used for the @ref avoptions interface for lswr.
* @{
*
*)
SWR_FLAG_RESAMPLE = 1;
/// < Force resampling even if equal sample rate
// TODO use int resample ?
// long term TODO can we enable this dynamically?
type
(* * Dithering algorithms *)
SwrDitherType = ( //
SWR_DITHER_NONE = 0, SWR_DITHER_RECTANGULAR, SWR_DITHER_TRIANGULAR, SWR_DITHER_TRIANGULAR_HIGHPASS,
SWR_DITHER_NS = 64,
/// < not part of API/ABI
SWR_DITHER_NS_LIPSHITZ, SWR_DITHER_NS_F_WEIGHTED, SWR_DITHER_NS_MODIFIED_E_WEIGHTED, SWR_DITHER_NS_IMPROVED_E_WEIGHTED,
SWR_DITHER_NS_SHIBATA, SWR_DITHER_NS_LOW_SHIBATA, SWR_DITHER_NS_HIGH_SHIBATA, SWR_DITHER_NB
/// < not part of API/ABI
);
(* * Resampling Engines *)
SwrEngine = ( //
SWR_ENGINE_SWR, (* *< SW Resampler *)
SWR_ENGINE_SOXR, (* *< SoX Resampler *)
SWR_ENGINE_NB
/// < not part of API/ABI
);
(* * Resampling Filter Types *)
SwrFilterType = ( //
SWR_FILTER_TYPE_CUBIC, (* *< Cubic *)
SWR_FILTER_TYPE_BLACKMAN_NUTTALL, (* *< Blackman Nuttall windowed sinc *)
SWR_FILTER_TYPE_KAISER (* *< Kaiser windowed sinc *)
);
type
(* *
* The libswresample context. Unlike libavcodec and libavformat, this structure
* is opaque. This means that if you would like to set options, you must use
* the @ref avoptions API and cannot directly set values to members of the
* structure.
*)
pSwrContext = ^SwrContext;
SwrContext = record
end;
(* *
* Get the AVClass for SwrContext. It can be used in combination with
* AV_OPT_SEARCH_FAKE_OBJ for examining options.
*
* @see av_opt_find().
* @return the AVClass of SwrContext
*)
// const AVClass *swr_get_class(void);
function swr_get_class(): pAVClass; cdecl; external swresample_dll;
(* *
* @name SwrContext constructor functions
* @{
*)
(* *
* Allocate SwrContext.
*
* If you use this function you will need to set the parameters (manually or
* with swr_alloc_set_opts()) before calling swr_init().
*
* @see swr_alloc_set_opts(), swr_init(), swr_free()
* @return NULL on error, allocated context otherwise
*)
// struct SwrContext *swr_alloc(void);
function swr_alloc(): pSwrContext; cdecl; external swresample_dll;
(* *
* Initialize context after user parameters have been set.
* @note The context must be configured using the AVOption API.
*
* @see av_opt_set_int()
* @see av_opt_set_dict()
*
* @param[in,out] s Swr context to initialize
* @return AVERROR error code in case of failure.
*)
// int swr_init(struct SwrContext *s);
function swr_init(s: pSwrContext): int; cdecl; external swresample_dll;
(* *
* Check whether an swr context has been initialized or not.
*
* @param[in] s Swr context to check
* @see swr_init()
* @return positive if it has been initialized, 0 if not initialized
*)
// int swr_is_initialized(struct SwrContext *s);
function swr_is_initialized(s: pSwrContext): int; cdecl; external swresample_dll;
(* *
* Allocate SwrContext if needed and set/reset common parameters.
*
* This function does not require s to be allocated with swr_alloc(). On the
* other hand, swr_alloc() can use swr_alloc_set_opts() to set the parameters
* on the allocated context.
*
* @param s existing Swr context if available, or NULL if not
* @param out_ch_layout output channel layout (AV_CH_LAYOUT_* )
* @param out_sample_fmt output sample format (AV_SAMPLE_FMT_* ).
* @param out_sample_rate output sample rate (frequency in Hz)
* @param in_ch_layout input channel layout (AV_CH_LAYOUT_* )
* @param in_sample_fmt input sample format (AV_SAMPLE_FMT_* ).
* @param in_sample_rate input sample rate (frequency in Hz)
* @param log_offset logging level offset
* @param log_ctx parent logging context, can be NULL
*
* @see swr_init(), swr_free()
* @return NULL on error, allocated context otherwise
*)
// struct SwrContext *swr_alloc_set_opts(struct SwrContext *s,
// int64_t out_ch_layout, enum AVSampleFormat out_sample_fmt, int out_sample_rate,
// int64_t in_ch_layout, enum AVSampleFormat in_sample_fmt, int in_sample_rate,
// int log_offset, void *log_ctx);
function swr_alloc_set_opts(s: pSwrContext; out_ch_layout: int64_t; out_sample_fmt: AVSampleFormat; out_sample_rate: int;
in_ch_layout: int64_t; in_sample_fmt: AVSampleFormat; in_sample_rate: int; log_offset: int; log_ctx: Pointer): pSwrContext; cdecl;
external swresample_dll;
(* *
* @}
*
* @name SwrContext destructor functions
* @{
*)
(* *
* Free the given SwrContext and set the pointer to NULL.
*
* @param[in] s a pointer to a pointer to Swr context
*)
// void swr_free(struct SwrContext **s);
procedure swr_free(var s: pSwrContext); cdecl; external swresample_dll;
(* *
* Closes the context so that swr_is_initialized() returns 0.
*
* The context can be brought back to life by running swr_init(),
* swr_init() can also be used without swr_close().
* This function is mainly provided for simplifying the usecase
* where one tries to support libavresample and libswresample.
*
* @param[in,out] s Swr context to be closed
*)
// void swr_close(struct SwrContext *s);
procedure swr_close(s: pSwrContext); cdecl; external swresample_dll;
(* *
* @}
*
* @name Core conversion functions
* @{
*)
(* * Convert audio.
*
* in and in_count can be set to 0 to flush the last few samples out at the
* end.
*
* If more input is provided than output space, then the input will be buffered.
* You can avoid this buffering by using swr_get_out_samples() to retrieve an
* upper bound on the required number of output samples for the given number of
* input samples. Conversion will run directly without copying whenever possible.
*
* @param s allocated Swr context, with parameters set
* @param out output buffers, only the first one need be set in case of packed audio
* @param out_count amount of space available for output in samples per channel
* @param in input buffers, only the first one need to be set in case of packed audio
* @param in_count number of input samples available in one channel
*
* @return number of samples output per channel, negative value on error
*)
// int swr_convert(struct SwrContext *s, uint8_t **out, int out_count,
// const uint8_t **in , int in_count);
function swr_convert(s: pSwrContext; _out: ppuint8_t; out_count: int; const _in: ppuint8_t; in_count: int): int; cdecl; external swresample_dll;
(* *
* Convert the next timestamp from input to output
* timestamps are in 1/(in_sample_rate * out_sample_rate) units.
*
* @note There are 2 slightly differently behaving modes.
* @li When automatic timestamp compensation is not used, (min_compensation >= FLT_MAX)
* in this case timestamps will be passed through with delays compensated
* @li When automatic timestamp compensation is used, (min_compensation < FLT_MAX)
* in this case the output timestamps will match output sample numbers.
* See ffmpeg-resampler(1) for the two modes of compensation.
*
* @param s[in] initialized Swr context
* @param pts[in] timestamp for the next input sample, INT64_MIN if unknown
* @see swr_set_compensation(), swr_drop_output(), and swr_inject_silence() are
* function used internally for timestamp compensation.
* @return the output timestamp for the next output sample
*)
// int64_t swr_next_pts(struct SwrContext *s, int64_t pts);
function swr_next_pts(s: pSwrContext; pts: int64_t): int64_t; cdecl; external swresample_dll;
(* *
* @}
*
* @name Low-level option setting functions
* These functons provide a means to set low-level options that is not possible
* with the AVOption API.
* @{
*)
(* *
* Activate resampling compensation ("soft" compensation). This function is
* internally called when needed in swr_next_pts().
*
* @param[in,out] s allocated Swr context. If it is not initialized,
* or SWR_FLAG_RESAMPLE is not set, swr_init() is
* called with the flag set.
* @param[in] sample_delta delta in PTS per sample
* @param[in] compensation_distance number of samples to compensate for
* @return >= 0 on success, AVERROR error codes if:
* @li @c s is NULL,
* @li @c compensation_distance is less than 0,
* @li @c compensation_distance is 0 but sample_delta is not,
* @li compensation unsupported by resampler, or
* @li swr_init() fails when called.
*)
// int swr_set_compensation(struct SwrContext *s, int sample_delta, int compensation_distance);
function swr_set_compensation(s: pSwrContext; sample_delta: int; compensation_distance: int): int; cdecl; external swresample_dll;
(* *
* Set a customized input channel mapping.
*
* @param[in,out] s allocated Swr context, not yet initialized
* @param[in] channel_map customized input channel mapping (array of channel
* indexes, -1 for a muted channel)
* @return >= 0 on success, or AVERROR error code in case of failure.
*)
// int swr_set_channel_mapping(struct SwrContext *s, const int *channel_map);
function swr_set_channel_mapping(s: pSwrContext; const channel_map: pint): int; cdecl; external swresample_dll;
(* *
* Generate a channel mixing matrix.
*
* This function is the one used internally by libswresample for building the
* default mixing matrix. It is made public just as a utility function for
* building custom matrices.
*
* @param in_layout input channel layout
* @param out_layout output channel layout
* @param center_mix_level mix level for the center channel
* @param surround_mix_level mix level for the surround channel(s)
* @param lfe_mix_level mix level for the low-frequency effects channel
* @param rematrix_maxval if 1.0, coefficients will be normalized to prevent
* overflow. if INT_MAX, coefficients will not be
* normalized.
* @param[out] matrix mixing coefficients; matrix[i + stride * o] is
* the weight of input channel i in output channel o.
* @param stride distance between adjacent input channels in the
* matrix array
* @param matrix_encoding matrixed stereo downmix mode (e.g. dplii)
* @param log_ctx parent logging context, can be NULL
* @return 0 on success, negative AVERROR code on failure
*)
// int swr_build_matrix(uint64_t in_layout, uint64_t out_layout,
// double center_mix_level, double surround_mix_level,
// double lfe_mix_level, double rematrix_maxval,
// double rematrix_volume, double *matrix,
// int stride, enum AVMatrixEncoding matrix_encoding,
// void *log_ctx);
function swr_build_matrix(in_layout: uint64_t; out_layout: uint64_t; center_mix_level: double; surround_mix_level: double;
lfe_mix_level: double; rematrix_maxval: double; rematrix_volume: double; var matrix: double; stride: int;
matrix_encoding: AVMatrixEncoding; log_ctx: Pointer): int; cdecl; external swresample_dll;
(* *
* Set a customized remix matrix.
*
* @param s allocated Swr context, not yet initialized
* @param matrix remix coefficients; matrix[i + stride * o] is
* the weight of input channel i in output channel o
* @param stride offset between lines of the matrix
* @return >= 0 on success, or AVERROR error code in case of failure.
*)
// int swr_set_matrix(struct SwrContext *s, const double *matrix, int stride);
function swr_set_matrix(s: pSwrContext; const matrix: pdouble; stride: int): int; cdecl; external swresample_dll;
(* *
* @}
*
* @name Sample handling functions
* @{
*)
(* *
* Drops the specified number of output samples.
*
* This function, along with swr_inject_silence(), is called by swr_next_pts()
* if needed for "hard" compensation.
*
* @param s allocated Swr context
* @param count number of samples to be dropped
*
* @return >= 0 on success, or a negative AVERROR code on failure
*)
// int swr_drop_output(struct SwrContext *s, int count);
function swr_drop_output(s: pSwrContext; count: int): int; cdecl; external swresample_dll;
(* *
* Injects the specified number of silence samples.
*
* This function, along with swr_drop_output(), is called by swr_next_pts()
* if needed for "hard" compensation.
*
* @param s allocated Swr context
* @param count number of samples to be dropped
*
* @return >= 0 on success, or a negative AVERROR code on failure
*)
// int swr_inject_silence(struct SwrContext *s, int count);
function swr_inject_silence(s: pSwrContext; count: int): int; cdecl; external swresample_dll;
(* *
* Gets the delay the next input sample will experience relative to the next output sample.
*
* Swresample can buffer data if more input has been provided than available
* output space, also converting between sample rates needs a delay.
* This function returns the sum of all such delays.
* The exact delay is not necessarily an integer value in either input or
* output sample rate. Especially when downsampling by a large value, the
* output sample rate may be a poor choice to represent the delay, similarly
* for upsampling and the input sample rate.
*
* @param s swr context
* @param base timebase in which the returned delay will be:
* @li if it's set to 1 the returned delay is in seconds
* @li if it's set to 1000 the returned delay is in milliseconds
* @li if it's set to the input sample rate then the returned
* delay is in input samples
* @li if it's set to the output sample rate then the returned
* delay is in output samples
* @li if it's the least common multiple of in_sample_rate and
* out_sample_rate then an exact rounding-free delay will be
* returned
* @returns the delay in 1 / @c base units.
*)
// int64_t swr_get_delay(struct SwrContext *s, int64_t base);
function swr_get_delay(s: pSwrContext; base: int64_t): int64_t; cdecl; external swresample_dll;
(* *
* Find an upper bound on the number of samples that the next swr_convert
* call will output, if called with in_samples of input samples. This
* depends on the internal state, and anything changing the internal state
* (like further swr_convert() calls) will may change the number of samples
* swr_get_out_samples() returns for the same number of input samples.
*
* @param in_samples number of input samples.
* @note any call to swr_inject_silence(), swr_convert(), swr_next_pts()
* or swr_set_compensation() invalidates this limit
* @note it is recommended to pass the correct available buffer size
* to all functions like swr_convert() even if swr_get_out_samples()
* indicates that less would be used.
* @returns an upper bound on the number of samples that the next swr_convert
* will output or a negative value to indicate an error
*)
// int swr_get_out_samples(struct SwrContext *s, int in_samples);
function swr_get_out_samples(s: pSwrContext; in_samples: int): int; cdecl; external swresample_dll;
(* *
* @}
*
* @name Configuration accessors
* @{
*)
(* *
* Return the @ref LIBSWRESAMPLE_VERSION_INT constant.
*
* This is useful to check if the build-time libswresample has the same version
* as the run-time one.
*
* @returns the unsigned int-typed version
*)
// unsigned swresample_version(void);
function swresample_version(): unsigned; cdecl; external swresample_dll;
(* *
* Return the swr build-time configuration.
*
* @returns the build-time @c ./configure flags
*)
// const char *swresample_configuration(void);
function swresample_configuration(): pAnsiChar; cdecl; external swresample_dll;
(* *
* Return the swr license.
*
* @returns the license of libswresample, determined at build-time
*)
// const char *swresample_license(void);
function swresample_license(): pAnsiChar; cdecl; external swresample_dll;
(* *
* @}
*
* @name AVFrame based API
* @{
*)
(* *
* Convert the samples in the input AVFrame and write them to the output AVFrame.
*
* Input and output AVFrames must have channel_layout, sample_rate and format set.
*
* If the output AVFrame does not have the data pointers allocated the nb_samples
* field will be set using av_frame_get_buffer()
* is called to allocate the frame.
*
* The output AVFrame can be NULL or have fewer allocated samples than required.
* In this case, any remaining samples not written to the output will be added
* to an internal FIFO buffer, to be returned at the next call to this function
* or to swr_convert().
*
* If converting sample rate, there may be data remaining in the internal
* resampling delay buffer. swr_get_delay() tells the number of
* remaining samples. To get this data as output, call this function or
* swr_convert() with NULL input.
*
* If the SwrContext configuration does not match the output and
* input AVFrame settings the conversion does not take place and depending on
* which AVFrame is not matching AVERROR_OUTPUT_CHANGED, AVERROR_INPUT_CHANGED
* or the result of a bitwise-OR of them is returned.
*
* @see swr_delay()
* @see swr_convert()
* @see swr_get_delay()
*
* @param swr audio resample context
* @param output output AVFrame
* @param input input AVFrame
* @return 0 on success, AVERROR on failure or nonmatching
* configuration.
*)
// int swr_convert_frame(SwrContext *swr, AVFrame *output, const AVFrame *input);
function swr_convert_frame(swr: pSwrContext; output: pAVFrame; const input: pAVFrame): int; cdecl; external swresample_dll;
(* *
* Configure or reconfigure the SwrContext using the information
* provided by the AVFrames.
*
* The original resampling context is reset even on failure.
* The function calls swr_close() internally if the context is open.
*
* @see swr_close();
*
* @param swr audio resample context
* @param output output AVFrame
* @param input input AVFrame
* @return 0 on success, AVERROR on failure.
*)
// int swr_config_frame(SwrContext *swr, const AVFrame *out, const AVFrame *in);
function swr_config_frame(swr: pSwrContext; const _out: pAVFrame; const _in: pAVFrame): int; cdecl; external swresample_dll;
implementation
end.

372
ffmpeg/libswscale.pas Normal file
View File

@ -0,0 +1,372 @@
unit libswscale;
{$IFDEF FPC}
{$MODE Delphi}
{$ENDIF}
interface
Uses
ffmpeg_types, libavutil;
{$I ffmpeg.inc}
(*
* @defgroup libsws libswscale
* Color conversion and scaling library.
*
* @{
*
* Return the LIBSWSCALE_VERSION_INT constant.
*)
// unsigned swscale_version(void);
function swscale_version(): unsigned; cdecl; external swscale_dll;
(*
* Return the libswscale build-time configuration.
*)
// const char *swscale_configuration(void);
function swscale_configuration(): pAnsiChar; cdecl; external swscale_dll;
(*
* Return the libswscale license.
*)
// const char *swscale_license(void);
function swscale_license(): pAnsiChar; cdecl; external swscale_dll;
const
(* values for the flags, the stuff on the command line is different *)
SWS_FAST_BILINEAR = 1;
SWS_BILINEAR = 2;
SWS_BICUBIC = 4;
SWS_X = 8;
SWS_POINT = $10;
SWS_AREA = $20;
SWS_BICUBLIN = $40;
SWS_GAUSS = $80;
SWS_SINC = $100;
SWS_LANCZOS = $200;
SWS_SPLINE = $400;
SWS_SRC_V_CHR_DROP_MASK = $30000;
SWS_SRC_V_CHR_DROP_SHIFT = 16;
SWS_PARAM_DEFAULT = 123456;
SWS_PRINT_INFO = $1000;
// the following 3 flags are not completely implemented
// internal chrominance subsampling info
SWS_FULL_CHR_H_INT = $2000;
// input subsampling info
SWS_FULL_CHR_H_INP = $4000;
SWS_DIRECT_BGR = $8000;
SWS_ACCURATE_RND = $40000;
SWS_BITEXACT = $80000;
SWS_ERROR_DIFFUSION = $800000;
SWS_MAX_REDUCE_CUTOFF = 0.002;
SWS_CS_ITU709 = 1;
SWS_CS_FCC = 4;
SWS_CS_ITU601 = 5;
SWS_CS_ITU624 = 5;
SWS_CS_SMPTE170M = 5;
SWS_CS_SMPTE240M = 7;
SWS_CS_DEFAULT = 5;
SWS_CS_BT2020 = 9;
(*
* Return a pointer to yuv<->rgb coefficients for the given colorspace
* suitable for sws_setColorspaceDetails().
*
* @param colorspace One of the SWS_CS_* macros. If invalid,
* SWS_CS_DEFAULT is used.
*)
// const int *sws_getCoefficients(int colorspace);
function sws_getCoefficients(colorspace: int): pInt; cdecl; external swscale_dll;
// when used for filters they must have an odd number of elements
// coeffs cannot be shared between vectors
type
SwsVector = record
coeff: pdouble;
/// < pointer to the list of coefficients
length: int;
/// < number of coefficients in the vector
end;
pSwsVector = ^SwsVector;
// vectors can be shared
SwsFilter = record
lumH: pSwsVector;
lumV: pSwsVector;
chrH: pSwsVector;
chrV: pSwsVector;
End;
pSwsFilter = ^SwsFilter;
SwsContext = record
end;
pSwsContext = ^SwsContext;
Tsws_array_uint8_t = array_uint8_t;
psws_array_uint8_t = ^Tsws_array_uint8_t;
Tsws_array_int = array_int;
psws_array_int = ^Tsws_array_int;
Tsws_array4_int = array4_int;
psws_array4_int = ^Tsws_array4_int;
(*
* Return a positive value if pix_fmt is a supported input format, 0
* otherwise.
*)
// int sws_isSupportedInput(enum AVPixelFormat pix_fmt);
function sws_isSupportedInput(pix_fmt: AVPixelFormat): int; cdecl; external swscale_dll;
(*
* Return a positive value if pix_fmt is a supported output format, 0
* otherwise.
*)
// int sws_isSupportedOutput(enum AVPixelFormat pix_fmt);
function sws_isSupportedOutput(pix_fmt: AVPixelFormat): int; cdecl; external swscale_dll;
(*
* @param[in] pix_fmt the pixel format
* @return a positive value if an endianness conversion for pix_fmt is
* supported, 0 otherwise.
*)
// int sws_isSupportedEndiannessConversion(enum AVPixelFormat pix_fmt);
function sws_isSupportedEndiannessConversion(pix_fmt: AVPixelFormat): int; cdecl; external swscale_dll;
(*
* Allocate an empty SwsContext. This must be filled and passed to
* sws_init_context(). For filling see AVOptions, options.c and
* sws_setColorspaceDetails().
*)
// struct SwsContext *sws_alloc_context(void);
function sws_alloc_context(): pSwsContext; cdecl; external swscale_dll;
(*
* Initialize the swscaler context sws_context.
*
* @return zero or positive value on success, a negative value on
* error
*)
// av_warn_unused_result
// int sws_init_context(struct SwsContext *sws_context, SwsFilter *srcFilter, SwsFilter *dstFilter);
function sws_init_context(sws_context: pSwsContext; srcFilter: pSwsFilter; dstFilter: pSwsFilter): int; cdecl; external swscale_dll;
(*
* Free the swscaler context swsContext.
* If swsContext is NULL, then does nothing.
*)
// void sws_freeContext(struct SwsContext *swsContext);
procedure sws_freeContext(SwsContext: pSwsContext); cdecl; external swscale_dll;
(*
* Allocate and return an SwsContext. You need it to perform
* scaling/conversion operations using sws_scale().
*
* @param srcW the width of the source image
* @param srcH the height of the source image
* @param srcFormat the source image format
* @param dstW the width of the destination image
* @param dstH the height of the destination image
* @param dstFormat the destination image format
* @param flags specify which algorithm and options to use for rescaling
* @param param extra parameters to tune the used scaler
* For SWS_BICUBIC param[0] and [1] tune the shape of the basis
* function, param[0] tunes f(1) and param[1] f´(1)
* For SWS_GAUSS param[0] tunes the exponent and thus cutoff
* frequency
* For SWS_LANCZOS param[0] tunes the width of the window function
* @return a pointer to an allocated context, or NULL in case of error
* @note this function is to be removed after a saner alternative is
* written
*)
// struct SwsContext *sws_getContext(int srcW, int srcH, enum AVPixelFormat srcFormat,
// int dstW, int dstH, enum AVPixelFormat dstFormat,
// int flags, SwsFilter *srcFilter,
// SwsFilter *dstFilter, const double *param);
function sws_getContext(srcW: int; srcH: int; srcFormat: AVPixelFormat; dstW: int; dstH: int; dstFormat: AVPixelFormat; flags: int; srcFilter: pSwsFilter;
dstFilter: pSwsFilter; const param: pdouble): pSwsContext; cdecl; external swscale_dll;
(*
* Scale the image slice in srcSlice and put the resulting scaled
* slice in the image in dst. A slice is a sequence of consecutive
* rows in an image.
*
* Slices have to be provided in sequential order, either in
* top-bottom or bottom-top order. If slices are provided in
* non-sequential order the behavior of the function is undefined.
*
* @param c the scaling context previously created with
* sws_getContext()
* @param srcSlice the array containing the pointers to the planes of
* the source slice
* @param srcStride the array containing the strides for each plane of
* the source image
* @param srcSliceY the position in the source image of the slice to
* process, that is the number (counted starting from
* zero) in the image of the first row of the slice
* @param srcSliceH the height of the source slice, that is the number
* of rows in the slice
* @param dst the array containing the pointers to the planes of
* the destination image
* @param dstStride the array containing the strides for each plane of
* the destination image
* @return the height of the output slice
*)
// int sws_scale(struct SwsContext *c, const uint8_t *const srcSlice[],
// const int srcStride[], int srcSliceY, int srcSliceH,
// uint8_t *const dst[], const int dstStride[]);
function sws_scale(c: pSwsContext; const srcSlice: psws_array_uint8_t; const srcStride: psws_array_int; srcSliceY: int; srcSliceH: int; dst: psws_array_uint8_t;
const dstStride: psws_array_int): int; cdecl; overload; external swscale_dll;
(*
* @param dstRange flag indicating the while-black range of the output (1=jpeg / 0=mpeg)
* @param srcRange flag indicating the while-black range of the input (1=jpeg / 0=mpeg)
* @param table the yuv2rgb coefficients describing the output yuv space, normally ff_yuv2rgb_coeffs[x]
* @param inv_table the yuv2rgb coefficients describing the input yuv space, normally ff_yuv2rgb_coeffs[x]
* @param brightness 16.16 fixed point brightness correction
* @param contrast 16.16 fixed point contrast correction
* @param saturation 16.16 fixed point saturation correction
* @return -1 if not supported
*)
// int sws_setColorspaceDetails(struct SwsContext *c, const int inv_table[4],
// int srcRange, const int table[4], int dstRange,
// int brightness, int contrast, int saturation);
function sws_setColorspaceDetails(c: pSwsContext; const inv_table: psws_array4_int; srcRange: int; const table: psws_array4_int; dstRange: int; brightness: int;
contrast: int; saturation: int): int; cdecl; external swscale_dll;
(*
* @return -1 if not supported
*)
// int sws_getColorspaceDetails(struct SwsContext *c, int **inv_table,
// int *srcRange, int **table, int *dstRange,
// int *brightness, int *contrast, int *saturation);
function sws_getColorspaceDetails(c: pSwsContext; var inv_table: pInt; var srcRange: int; var table: pInt; var dstRange: int; var brightness: int;
var contrast: int; var saturation: int): int; cdecl; external swscale_dll;
(*
* Allocate and return an uninitialized vector with length coefficients.
*)
// SwsVector *sws_allocVec(int length);
function sws_allocVec(length: int): pSwsVector; cdecl; external swscale_dll;
(*
* Return a normalized Gaussian curve used to filter stuff
* quality = 3 is high quality, lower is lower quality.
*)
// SwsVector *sws_getGaussianVec(double variance, double quality);
function sws_getGaussianVec(variance: double; quality: double): pSwsVector; cdecl; external swscale_dll;
(*
* Scale all the coefficients of a by the scalar value.
*)
// void sws_scaleVec(SwsVector *a, double scalar);
procedure sws_scaleVec(a: pSwsVector; scalar: double); cdecl; external swscale_dll;
(*
* Scale all the coefficients of a so that their sum equals height.
*)
// void sws_normalizeVec(SwsVector *a, double height);
procedure sws_normalizeVec(a: pSwsVector; height: double); cdecl; external swscale_dll;
{$IFDEF FF_API_SWS_VECTOR}
// attribute_deprecated SwsVector *sws_getConstVec(double c, int length);
function sws_getConstVec(c: double; length: int): pSwsVector; cdecl; external swscale_dll;
// attribute_deprecated SwsVector *sws_getIdentityVec(void);
function sws_getIdentityVec(): pSwsVector; cdecl; external swscale_dll;
// attribute_deprecated void sws_convVec(SwsVector *a, SwsVector *b);
procedure sws_convVec(a: pSwsVector; b: pSwsVector); cdecl; external swscale_dll;
// attribute_deprecated void sws_addVec(SwsVector *a, SwsVector *b);
procedure sws_addVec(a: pSwsVector; b: pSwsVector); cdecl; external swscale_dll;
// attribute_deprecated void sws_subVec(SwsVector *a, SwsVector *b);
procedure sws_subVec(a: pSwsVector; b: pSwsVector); cdecl; external swscale_dll;
// attribute_deprecated void sws_shiftVec(SwsVector *a, int shift);
procedure sws_shiftVec(a: pSwsVector; shift: int); cdecl; external swscale_dll;
// attribute_deprecated SwsVector *sws_cloneVec(SwsVector *a);
function sws_cloneVec(a: pSwsVector): pSwsVector; cdecl; external swscale_dll;
// attribute_deprecated void sws_printVec2(SwsVector *a, AVClass *log_ctx, int log_level);
procedure sws_printVec2(a: pSwsVector; log_ctx: pAVClass; log_level: int); cdecl; external swscale_dll;
{$ENDIF}
// void sws_freeVec(SwsVector *a);
procedure sws_freeVec(a: pSwsVector); cdecl; external swscale_dll;
// SwsFilter *sws_getDefaultFilter(float lumaGBlur, float chromaGBlur,
// float lumaSharpen, float chromaSharpen,
// float chromaHShift, float chromaVShift,
// int verbose);
function sws_getDefaultFilter(lumaGBlur: float; chromaGBlur: float; lumaSharpen: float; chromaSharpen: float; chromaHShift: float; chromaVShift: float;
verbose: int): pSwsFilter; cdecl; external swscale_dll;
// void sws_freeFilter(SwsFilter *filter);
procedure sws_freeFilter(filter: pSwsFilter); cdecl; external swscale_dll;
(*
* Check if context can be reused, otherwise reallocate a new one.
*
* If context is NULL, just calls sws_getContext() to get a new
* context. Otherwise, checks if the parameters are the ones already
* saved in context. If that is the case, returns the current
* context. Otherwise, frees context and gets a new context with
* the new parameters.
*
* Be warned that srcFilter and dstFilter are not checked, they
* are assumed to remain the same.
*)
// struct SwsContext *sws_getCachedContext(struct SwsContext *context,
// int srcW, int srcH, enum AVPixelFormat srcFormat,
// int dstW, int dstH, enum AVPixelFormat dstFormat,
// int flags, SwsFilter *srcFilter,
// SwsFilter *dstFilter, const double *param);
function sws_getCachedContext(context: pSwsContext; srcW: int; srcH: int; srcFormat: AVPixelFormat; dstW: int; dstH: int; dstFormat: AVPixelFormat; flags: int;
srcFilter: pSwsFilter; dstFilter: pSwsFilter; const param: pdouble): pSwsContext; cdecl; external swscale_dll;
(*
* Convert an 8-bit paletted frame into a frame with a color depth of 32 bits.
*
* The output frame will have the same packed format as the palette.
*
* @param src source frame buffer
* @param dst destination frame buffer
* @param num_pixels number of pixels to convert
* @param palette array with [256] entries, which must match color arrangement (RGB or BGR) of src
*)
// void sws_convertPalette8ToPacked32(const uint8_t *src, uint8_t *dst, int num_pixels, const uint8_t *palette);
procedure sws_convertPalette8ToPacked32(const src: puint8_t; var dst: uint8_t; num_pixels: int; const palette: puint8_t); cdecl; external swscale_dll;
(*
* Convert an 8-bit paletted frame into a frame with a color depth of 24 bits.
*
* With the palette format "ABCD", the destination frame ends up with the format "ABC".
*
* @param src source frame buffer
* @param dst destination frame buffer
* @param num_pixels number of pixels to convert
* @param palette array with [256] entries, which must match color arrangement (RGB or BGR) of src
*)
// void sws_convertPalette8ToPacked24(const uint8_t *src, uint8_t *dst, int num_pixels, const uint8_t *palette);
procedure sws_convertPalette8ToPacked24(const src: puint8_t; var dst: uint8_t; num_pixels: int; const palette: puint8_t); cdecl; external swscale_dll;
(*
* Get the AVClass for swsContext. It can be used in combination with
* AV_OPT_SEARCH_FAKE_OBJ for examining options.
*
* @see av_opt_find().
*)
// const AVClass *sws_get_class(void);
function sws_get_class(): pAVClass; cdecl; external swscale_dll;
implementation
end.

View File

@ -31,7 +31,7 @@
<PackageName Value="LCL"/>
</Item1>
</RequiredPackages>
<Units Count="121">
<Units Count="122">
<Unit0>
<Filename Value="fpPS4.lpr"/>
<IsPartOfProject Value="True"/>
@ -553,6 +553,11 @@
<Filename Value="kernel\ps4_pthread_attr.pas"/>
<IsPartOfProject Value="True"/>
</Unit120>
<Unit121>
<Filename Value="src\ps4_libsceavplayer.pas"/>
<IsPartOfProject Value="True"/>
<UnitName Value="ps4_libSceAvPlayer"/>
</Unit121>
</Units>
</ProjectOptions>
<CompilerOptions>
@ -564,7 +569,7 @@
<SearchPaths>
<IncludeFiles Value="$(ProjOutDir);kernel;src\ajm;sys"/>
<Libraries Value="static"/>
<OtherUnitFiles Value="rtl;sys;vulkan;chip;spirv;kernel;src;src\ajm;src\audio;src\np;src\playgo;shaders;src\inputs;src\libcinternal"/>
<OtherUnitFiles Value="rtl;sys;vulkan;chip;spirv;kernel;src;src\ajm;src\audio;src\np;src\playgo;shaders;src\inputs;src\libcinternal;ffmpeg"/>
<UnitOutputDirectory Value="lib\$(TargetCPU)-$(TargetOS)"/>
</SearchPaths>
<Parsing>

View File

@ -43,6 +43,7 @@ uses
ps4_libSceAudioOut,
ps4_libSceVoice,
ps4_libSceVideoOut,
ps4_libSceAvPlayer,
ps4_libScePad,
ps4_libSceNpWebApi,
ps4_libSceRudp,

866
src/ps4_libsceavplayer.pas Normal file
View File

@ -0,0 +1,866 @@
unit ps4_libSceAvPlayer;
{$mode ObjFPC}{$H+}
interface
uses
libavcodec,
libavdevice,
libavformat,
libavutil,
libswscale,
libswresample,
windows,
ps4_program,
spinlock,
sys_signal,
sys_path,
sys_time,
sys_pthread,
Classes,
SysUtils,
Generics.Collections,
Math;
implementation
uses
sys_kernel;
const
LANGUAGE_CODE_ENG:array[0..3] of Char=('E', 'N', 'G', #0);
DIRECTORY_AVPLAYER_DUMP='avplayer_dump';
BUFFER_COUNT=2;
type
TAVPacketQueue=specialize TQueue<PAVPacket>;
SceAvPlayerAllocate=function(argP:Pointer;argAlignment:DWord;argSize:DWord):Pointer; SysV_ABI_CDecl;
SceAvPlayerDeallocate=procedure(argP:Pointer;argMemory:Pointer); SysV_ABI_CDecl;
SceAvPlayerAllocateTexture=function(argP:Pointer;argAlignment:DWord;argSize:DWord):Pointer; SysV_ABI_CDecl;
SceAvPlayerDeallocateTexture=procedure(argP:Pointer;argMemory:Pointer); SysV_ABI_CDecl;
SceAvPlayerOpenFile=function(argP:Pointer;argFilename:PChar):Integer; SysV_ABI_CDecl;
SceAvPlayerCloseFile=function(argP:Pointer):Integer; SysV_ABI_CDecl;
SceAvPlayerReadOffsetFile=function(argP:Pointer;argBuffer:PByte;argPosition:QWord;argLength:DWord):Integer; SysV_ABI_CDecl;
SceAvPlayerSizeFile=function(argP:Pointer):QWord; SysV_ABI_CDecl;
SceAvPlayerEventCallback=procedure(p:Pointer;argEventId:Integer;argSourceId:Integer;argEventData:Pointer); SysV_ABI_CDecl;
SceAvPlayerMemAllocator=packed record
objectPointer :Pointer;
allocate :SceAvPlayerAllocate;
deallocate :SceAvPlayerDeallocate;
allocateTexture :SceAvPlayerAllocateTexture;
deallocateTexture:SceAvPlayerDeallocateTexture;
end;
SceAvPlayerFileReplacement=packed record
objectPointer:Pointer;
open :SceAvPlayerOpenFile;
close :SceAvPlayerCloseFile;
readOffset :SceAvPlayerReadOffsetFile;
size :SceAvPlayerSizeFile;
end;
SceAvPlayerEventReplacement=packed record
objectPointer:Pointer;
eventCallback:SceAvPlayerEventCallback;
end;
SceAvPlayerInitData=packed record
memoryReplacement :SceAvPlayerMemAllocator;
fileReplacement :SceAvPlayerFileReplacement;
eventReplacement :SceAvPlayerEventReplacement;
debugLevel :DWord;
basePriority :DWord;
numOutputVideoFrameBuffers:Integer;
autoStart :Boolean;
reserved :array[0..2] of Byte;
defaultLanguage :PChar;
end;
PSceAvPlayerInitData=^SceAvPlayerInitData;
SceAvPlayerInitDataEx=packed record
thisSize :QWORD;
memoryReplacement :SceAvPlayerMemAllocator;
fileReplacement :SceAvPlayerFileReplacement;
eventReplacement :SceAvPlayerEventReplacement;
defaultLanguage :PChar;
debugLevel :DWORD; //SceAvPlayerDebuglevels
audioDecoderPriority :DWORD;
audioDecoderAffinity :DWORD;
videoDecoderPriority :DWORD;
videoDecoderAffinity :DWORD;
demuxerPriority :DWORD;
demuxerAffinity :DWORD;
controllerPriority :DWORD;
controllerAffinity :DWORD;
httpStreamingPriority :DWORD;
httpStreamingAffinity :DWORD;
fileStreamingPriority :DWORD;
fileStreamingAffinity :DWORD;
numOutputVideoFrameBuffers:Integer;
autoStart :Boolean;
reserved :array[0..2] of Byte;
_align :DWORD;
end;
PSceAvPlayerInitDataEx=^SceAvPlayerInitDataEx;
SceAvPlayerAudio=packed record
channelCount:Word;
reserved :array[0..1] of Byte;
sampleRate :DWord;
size :DWord;
languageCode:array[0..3] of Char;
end;
SceAvPlayerVideo=packed record
width :DWord;
height :DWord;
aspectRatio :Single;
languageCode:array[0..3] of Char;
end;
SceAvPlayerTextPosition=packed record
top :Word;
left :Word;
bottom:Word;
right :Word;
end;
SceAvPlayerTimedText=packed record
languageCode:array[0..3] of Char;
textSize :Word;
fontSize :Word;
position :SceAvPlayerTextPosition;
end;
SceAvPlayerStreamDetails=packed record
case byte of //union
0:(reserved:array[0..15] of Byte);
1:(audio :SceAvPlayerAudio );
2:(video :SceAvPlayerVideo );
3:(subs :SceAvPlayerTimedText);
end;
SceAvPlayerFrameInfo=packed record
pData :PByte;
reserved :DWORD;
_align :DWORD;
timeStamp:QWord; //The timestamp in ms
details :SceAvPlayerStreamDetails;
end;
PSceAvPlayerFrameInfo=^SceAvPlayerFrameInfo;
SceAvPlayerAudioEx=packed record
channelCount:Word;
reserved :array[0..1] of Byte;
sampleRate :DWord;
size :DWord;
languageCode:array[0..3] of Char;
reserved1 :array[0..63] of Byte;
end;
SceAvPlayerVideoEx=packed record
width :DWord;
height :DWord;
aspectRatio :Single;
languageCode :array[0..3] of Char;
framerate :DWord;
cropLeftOffset :DWord;
cropRightOffset :DWord;
cropTopOffset :DWord;
cropBottomOffset :DWord;
pitch :DWord;
lumaBitDepth :Byte;
chromaBitDepth :Byte;
videoFullRangeFlag:Boolean;
reserved :array[0..36] of Byte;
end;
SceAvPlayerTimedTextEx=packed record
languageCode:array[0..3] of Char;
reserved :array[0..75] of Byte;
end;
SceAvPlayerStreamDetailsEx=packed record
Case Byte of //union
0:(audio :SceAvPlayerAudioEx );
1:(video :SceAvPlayerVideoEx );
2:(subs :SceAvPlayerTimedTextEx);
3:(reserved:array[0..79] of Byte );
end;
SceAvPlayerFrameInfoEx=packed record
pData :PByte;
reserved :DWORD;
_align :DWORD;
timeStamp:QWord; //The timestamp in ms
details :SceAvPlayerStreamDetailsEx;
end;
PSceAvPlayerFrameInfoEx=^SceAvPlayerFrameInfoEx;
PSceAvPlayerPostInitData = Pointer;
TMemChunk=packed record
pData:Pointer;
fSize:Ptruint;
end;
TAvPlayerState=class
formatContext :PAVFormatContext;
audioCodecContext :PAVCodecContext;
videoCodecContext :PAVCodecContext;
audioPackets :TAVPacketQueue;
videoPackets :TAVPacketQueue;
lastTimeStamp :QWord;
audioBuffer :array[0..BUFFER_COUNT-1] of PSmallInt;
videoBuffer :array[0..BUFFER_COUNT-1] of PByte;
videoStreamId :Integer;
audioStreamId :Integer;
channelCount,
sampleCount,
sampleRate :Integer;
source :RawByteString; // TODO: "sceAvPlayerAddSource" indicates there may be more than 1 source per instance
info :Pointer; // Pointer to TAvPlayerInfo
constructor Create;
destructor Destroy; override;
procedure CreateMedia(const aSource: RawByteString);
procedure FreeMedia;
function NextPacket(const id:Integer):Boolean;
function ReceiveAudio:TMemChunk;
function ReceiveVideo:TMemChunk;
function GetFramerate:QWord;
function IsPlaying:Boolean;
function Buffer(const aType:DWord;const chunk:TMemChunk):Pointer;
end;
TAvPlayerInfo=record
playerState :TAvPlayerState;
//
isLooped :Boolean;
isPaused :Boolean;
lastFrameTime :QWord;
memoryReplacement:SceAvPlayerMemAllocator;
fileReplacement :SceAvPlayerFileReplacement;
eventReplacement :SceAvPlayerEventReplacement;
end;
PAvPlayerInfo=^TAvPlayerInfo;
// For now AvPlayer handle is pointer that points directly to player struct
SceAvPlayerHandle=PAvPlayerInfo;
var
lock:Pointer;
function GetTimeInUs:QWord; inline;
begin
Result:=SwGetTimeUsec;
end;
constructor TAvPlayerState.Create;
begin
inherited Create;
videoStreamId:=-1;
audioStreamId:=-1;
end;
destructor TAvPlayerState.Destroy;
begin
FreeMedia;
inherited;
end;
procedure TAvPlayerState.CreateMedia(const aSource: RawByteString);
var
videoCodec :PAVCodec;
audioCodec :PAVCodec;
videoStream:PAVStream;
audioStream:PAVStream;
p :Pointer;
begin
FreeMedia;
source:=aSource;
formatContext:=avformat_alloc_context;
avformat_open_input(formatContext,PChar(source),nil,ppAVDictionary(nil));
Writeln(SysLogPrefix,source);
Writeln(SysLogPrefix,Format('Format: %s, duration: %dms',[formatContext^.iformat^.long_name,formatContext^.duration div 1000]));
// Print some useful information about media
videoStreamId:=av_find_best_stream(formatContext,AVMEDIA_TYPE_VIDEO,-1,-1,p,0);
audioStreamId:=av_find_best_stream(formatContext,AVMEDIA_TYPE_AUDIO,-1,-1,p,0);
if videoStreamId>=0 then
begin
videoStream:=formatContext^.streams[videoStreamId];
videoCodec:=avcodec_find_decoder(videoStream^.codecpar^.codec_id);
videoCodecContext:=avcodec_alloc_context3(videoCodec);
avcodec_parameters_to_context(videoCodecContext,videoStream^.codecpar);
avcodec_open2(videoCodecContext,videoCodec,nil);
Writeln(SysLogPrefix,Format('%d) Video codec: %s, resolution: %d x %d',[videoStreamId,videoCodec^.long_name,videoStream^.codecpar^.width,videoStream^.codecpar^.height]));
end;
if audioStreamId>=0 then
begin
audioStream:=formatContext^.streams[audioStreamId];
audioCodec:=avcodec_find_decoder(audioStream^.codecpar^.codec_id);
audioCodecContext:=avcodec_alloc_context3(audioCodec);
avcodec_parameters_to_context(audioCodecContext,audioStream^.codecpar);
avcodec_open2(audioCodecContext,audioCodec,nil);
Writeln(SysLogPrefix,Format('%d) Audio codec: %s, channels: %d, sample rate: %d',[audioStreamId,audioCodec^.long_name,audioStream^.codecpar^.channels,audioStream^.codecpar^.sample_rate]));
end;
audioPackets:=TAVPacketQueue.Create;
videoPackets:=TAVPacketQueue.Create;
end;
procedure TAvPlayerState.FreeMedia;
var
packet :PAVPacket;
I :Integer;
playerInfo:PAvPlayerInfo;
begin
if formatContext=nil then
Exit;
playerInfo:=info;
while audioPackets.Count>0 do
begin
packet:=audioPackets.Dequeue;
av_packet_free(packet);
end;
while videoPackets.Count>0 do
begin
packet:=videoPackets.Dequeue;
av_packet_free(packet);
end;
audioPackets.Free;
videoPackets.Free;
if videoCodecContext<>nil then
begin
avcodec_close(audioCodecContext);
avcodec_free_context(audioCodecContext);
end;
if audioCodecContext<>nil then
begin
avcodec_close(videoCodecContext);
avcodec_free_context(videoCodecContext);
end;
avformat_close_input(formatContext);
for I:=0 to BUFFER_COUNT-1 do
begin
if audioBuffer[I]<>nil then
begin
FreeMem(audioBuffer[I]);
end;
if videoBuffer[I]<>nil then
playerInfo^.memoryReplacement.deallocateTexture(playerInfo^.memoryReplacement.objectPointer,videoBuffer[I]);
end;
source:='';
formatContext:=nil;
end;
function TAvPlayerState.NextPacket(const id:Integer):Boolean;
var
thisQueue,
thatQueue:TAvPacketQueue;
packet :PAVPacket;
err :Integer;
begin
if id=videoStreamId then
begin
thisQueue:=videoPackets;
thatQueue:=audioPackets;
end else
begin
thisQueue:=audioPackets;
thatQueue:=videoPackets;
end;
while True do
begin
if thisQueue.Count>0 then
begin
packet:=thisQueue.Dequeue;
if id=videoStreamId then
begin
err:=avcodec_send_packet(videoCodecContext,packet);
assert(err=0);
end else
begin
err:=avcodec_send_packet(audioCodecContext,packet);
assert(err=0);
end;
av_packet_free(packet);
Exit(True);
end;
packet:=av_packet_alloc;
if av_read_frame(formatContext,packet)<>0 then
begin
Exit(False);
end;
if id=packet^.stream_index then
thisQueue.Enqueue(packet)
else
thatQueue.Enqueue(packet)
end;
end;
function TAvPlayerState.ReceiveAudio:TMemChunk;
var
err :Integer;
frame :PAVFrame;
i, j :Integer;
fdata :PSingle;
pcmSample:SmallInt;
begin
Result:=Default(TMemChunk);
if (audioStreamId<0) or (not IsPlaying) then Exit;
frame:=av_frame_alloc;
Result.pData:=nil;
while True do
begin
err:=avcodec_receive_frame(audioCodecContext,frame);
if (err=AVERROR_EAGAIN) and (NextPacket(audioStreamId)) then
continue;
if err<>0 then
begin
source:='';
break;
end;
//
if frame^.format<>Integer(AV_SAMPLE_FMT_FLTP) then
Writeln('Unknown audio format: ',frame^.format);
channelCount:=frame^.channels;
sampleCount:=frame^.nb_samples;
sampleRate:=frame^.sample_rate;
Result.fSize:=sampleCount*channelCount*SizeOf(SmallInt);
GetMem(Result.pData,Result.fSize);
for i:=0 to sampleCount-1 do
for j:=0 to channelCount-1 do
begin
fdata:=PSingle(frame^.data[j]);
pcmSample:=Floor(fdata[i]*High(SmallInt));
PSmallInt(Result.pData)[i*channelCount+j]:=pcmSample;
end;
break;
end;
av_frame_free(frame);
end;
function TAvPlayerState.ReceiveVideo:TMemChunk;
var
err :Integer;
frame :PAVFrame;
i :Integer;
fdata :PSingle;
sample :Single;
pcmSamplex:Word;
p :PByte;
begin
Result:=Default(TMemChunk);
if (videoStreamId<0) or (not IsPlaying) then Exit;
frame:=av_frame_alloc;
Result.pData:=nil;
while True do
begin
err:=avcodec_receive_frame(videoCodecContext,frame);
if (err=AVERROR_EAGAIN) and (NextPacket(videoStreamId)) then
continue;
if err<>0 then
begin
source:='';
end;
//
lastTimeStamp:=frame^.best_effort_timestamp;
Result.fSize:=videoCodecContext^.width*videoCodecContext^.height*SizeOf(DWord);
GetMem(Result.pData,Result.fSize);
p:=Result.pData;
for i:=0 to frame^.height-1 do
begin
Move(frame^.data[0][frame^.linesize[0]*i],p[0],frame^.width);
p:=p+frame^.width;
end;
for i:=0 to frame^.height div 2-1 do
begin
Move(frame^.data[1][frame^.linesize[1]*i],p[0],frame^.width div 2);
p:=p+frame^.width div 2;
end;
for i:=0 to frame^.height div 2-1 do
begin
Move(frame^.data[2][frame^.linesize[2]*i],p[0],frame^.width div 2);
p:=p+frame^.width div 2;
end;
break;
end;
av_frame_free(frame);
end;
function TAvPlayerState.GetFramerate:QWord;
var
rational:AVRational;
begin
rational:=formatContext^.streams[videoStreamId]^.avg_frame_rate;
Result:=Round(rational.den/rational.num * 1000000);
end;
function TAvPlayerState.IsPlaying:Boolean;
begin
Result:=source<>'';
end;
function TAvPlayerState.Buffer(const aType:DWord;const chunk:TMemChunk):Pointer;
var
playerInfo:PAvPlayerInfo;
begin
playerInfo:=info;
if aType=0 then
begin
if (chunk.pData<>nil) then
begin
if (audioBuffer[0]<>nil) then
begin
FreeMem(audioBuffer[0]);
end;
audioBuffer[0]:=chunk.pData;
end;
Exit(audioBuffer[0]);
end else
begin
if (chunk.pData<>nil) then
begin
if videoBuffer[0]=nil then
begin
videoBuffer[0]:=playerInfo^.memoryReplacement.allocateTexture(playerInfo^.memoryReplacement.objectPointer,0,chunk.fSize);
end;
Move(chunk.pData^,videoBuffer[0]^,chunk.fSize);
FreeMem(chunk.pData);
end;
Exit(videoBuffer[0]);
end;
end;
function _test_mem_alloc(var m:SceAvPlayerMemAllocator):Boolean; inline;
begin
Result:=False;
if (m.allocate =nil) then Exit;
if (m.deallocate =nil) then Exit;
if (m.allocateTexture =nil) then Exit;
if (m.deallocateTexture=nil) then Exit;
Result:=True;
end;
function _sceAvPlayerInit(pInit:PSceAvPlayerInitData):SceAvPlayerHandle;
begin
Result:=nil;
if (pInit=nil) then Exit;
if not _test_mem_alloc(pInit^.memoryReplacement) then
begin
Writeln(SysLogPrefix,'All allocators are required for AVPlayer Initialisation.');
Exit;
end;
Writeln(SysLogPrefix,'sceAvPlayerInit');
New(Result);
Result^.playerState:=TAvPlayerState.Create;
Result^.playerState.info :=Result;
Result^.memoryReplacement:=pInit^.memoryReplacement;
Result^.eventReplacement :=pInit^.eventReplacement;
Result^.fileReplacement :=pInit^.fileReplacement;
Result^.lastFrameTime :=GetTimeInUs;
end;
function ps4_sceAvPlayerInit(pInit:PSceAvPlayerInitData):SceAvPlayerHandle; SysV_ABI_CDecl;
begin
_sig_lock;
Result:=_sceAvPlayerInit(pInit);
_sig_unlock;
end;
function _sceAvPlayerInitEx(pInit:PSceAvPlayerInitDataEx):SceAvPlayerHandle;
begin
Result:=nil;
if (pInit=nil) then Exit;
if not _test_mem_alloc(pInit^.memoryReplacement) then
begin
Writeln(SysLogPrefix,'All allocators are required for AVPlayer Initialisation.');
Exit;
end;
Writeln(SysLogPrefix,'sceAvPlayerInitEx');
New(Result);
Result^.playerState:=TAvPlayerState.Create;
Result^.playerState.info :=Result;
Result^.memoryReplacement:=pInit^.memoryReplacement;
Result^.eventReplacement :=pInit^.eventReplacement;
Result^.fileReplacement :=pInit^.fileReplacement;
Result^.lastFrameTime :=GetTimeInUs;
end;
function ps4_sceAvPlayerInitEx(pInit:PSceAvPlayerInitDataEx):SceAvPlayerHandle; SysV_ABI_CDecl;
begin
_sig_lock;
Result:=_sceAvPlayerInitEx(pInit);
_sig_unlock;
end;
function ps4_sceAvPlayerPostInit(handle:SceAvPlayerHandle;pPostInit:PSceAvPlayerPostInitData):Integer; SysV_ABI_CDecl;
begin
Result:=-1;
if (handle=nil) or (pPostInit=nil) then Exit;
Writeln(SysLogPrefix,'sceAvPlayerPostInit');
Result:=0;
end;
function _sceAvPlayerAddSource(handle:SceAvPlayerHandle;argFilename:PChar):Integer;
const
BUF_SIZE=512*1024;
var
fileSize,
bytesRemaining,
offset :QWord;
bytesRead :Integer;
actualBufSize :QWord;
buf :array[0..BUF_SIZE-1] of Byte;
p :Pointer;
f :THandle;
source :RawByteString;
begin
Writeln(SysLogPrefix,'sceAvPlayerAddSource:',argFilename);
spin_lock(lock);
// With file functions provided by client
if (handle<>nil) and (handle^.fileReplacement.open<>nil) and (handle^.fileReplacement.close<>nil)
and (handle^.fileReplacement.readOffset<>nil) and (handle^.fileReplacement.size<>nil) then
begin
p:=handle^.fileReplacement.objectPointer;
if handle^.fileReplacement.open(p,argFilename)<0 then
begin
spin_unlock(lock);
Exit(-1);
end;
fileSize:=handle^.fileReplacement.size(p);
if (fileSize=0) then //result is uint64
begin
spin_unlock(lock);
Exit(-1);
end;
// Read data and write to dump directory
CreateDir(DIRECTORY_AVPLAYER_DUMP);
//
source:=DIRECTORY_AVPLAYER_DUMP+'/'+ExtractFileName(argFilename);
f:=FileCreate(source,fmOpenWrite);
//
bytesRemaining:=fileSize;
offset:=0;
while bytesRemaining>0 do
begin
actualBufSize:=Min(QWORD(BUF_SIZE),bytesRemaining);
bytesRead:=handle^.fileReplacement.readOffset(p,@buf[0],offset,actualBufSize);
if bytesRead<0 then
begin
handle^.fileReplacement.close(p);
spin_unlock(lock);
Exit(-1);
end;
FileWrite(f,buf,actualBufSize);
Dec(bytesRemaining,actualBufSize);
Inc(offset,actualBufSize);
end;
FileClose(f);
handle^.fileReplacement.close(p);
// Init player
handle^.playerState.CreateMedia(source);
Result:=0;
end else
// Without client-side file functions
begin
source:='';
Result:=parse_filename(argFilename,source);
if (Result=PT_FILE) then //only real files
begin
handle^.playerState.CreateMedia(source);
Result:=0;
end else
begin
Result:=-1;
end;
end;
spin_unlock(lock);
end;
function ps4_sceAvPlayerAddSource(handle:SceAvPlayerHandle;argFilename:PChar):Integer; SysV_ABI_CDecl;
begin
_sig_lock;
Result:=_sceAvPlayerAddSource(handle,argFilename);
_sig_unlock;
end;
function ps4_sceAvPlayerIsActive(handle:SceAvPlayerHandle): Boolean; SysV_ABI_CDecl;
begin
//Writeln(SysLogPrefix,'sceAvPlayerIsActive');
if (handle=nil) or (not handle^.playerState.IsPlaying) then
Exit(False);
Exit(True);
end;
function ps4_sceAvPlayerSetLooping(handle:SceAvPlayerHandle;loopFlag:Boolean):DWord; SysV_ABI_CDecl;
begin
Writeln(SysLogPrefix,'sceAvPlayerSetLooping');
Result:=0;
handle^.isLooped:=loopFlag;
end;
function _sceAvPlayerGetAudioData(handle:SceAvPlayerHandle;frameInfo:PSceAvPlayerFrameInfo):Boolean;
var
audioData:TMemChunk;
begin
//Writeln(SysLogPrefix,'sceAvPlayerGetAudioData');
Result:=False;
if (frameInfo<>nil) and (handle<>nil) and (handle^.playerState.IsPlaying) and (not handle^.isPaused) then
begin
audioData:=Default(TMemChunk);
spin_lock(lock);
audioData:=handle^.playerState.ReceiveAudio;
if (audioData.pData=nil) then
begin
spin_unlock(lock);
Exit(False);
end;
frameInfo^.timeStamp:=_usec2msec(handle^.playerState.lastTimeStamp);
frameInfo^.details.audio.channelCount:=handle^.playerState.channelCount;
frameInfo^.details.audio.sampleRate:=handle^.playerState.sampleRate;
frameInfo^.details.audio.size:=handle^.playerState.channelCount*handle^.playerState.sampleCount*SizeOf(SmallInt);
frameInfo^.pData:=handle^.playerState.Buffer(0,audioData);
spin_unlock(lock);
Result:=True;
end;
end;
function ps4_sceAvPlayerGetAudioData(handle:SceAvPlayerHandle;frameInfo:PSceAvPlayerFrameInfo):Boolean; SysV_ABI_CDecl;
begin
_sig_lock;
Result:=_sceAvPlayerGetAudioData(handle,frameInfo);
_sig_unlock;
end;
function _sceAvPlayerGetVideoDataEx(handle:SceAvPlayerHandle;frameInfo:PSceAvPlayerFrameInfoEx):Boolean;
var
videoData:TMemChunk;
begin
//Writeln(SysLogPrefix,'sceAvPlayerGetVideoDataEx');
Result:=False;
if (frameInfo<>nil) and (handle<>nil) and (handle^.playerState.IsPlaying) then
begin
videoData:=Default(TMemChunk);
spin_lock(lock);
if handle^.lastFrameTime+handle^.playerState.GetFramerate<GetTimeInUs then
begin
handle^.lastFrameTime:=GetTimeInUs;
videoData:=handle^.playerState.ReceiveVideo;
end;
if (videoData.pData=nil) then
begin
spin_unlock(lock);
Exit(False);
end;
frameInfo^.timeStamp:=_usec2msec(handle^.playerState.lastTimeStamp);
frameInfo^.details.video.width:=handle^.playerState.videoCodecContext^.width;
frameInfo^.details.video.height:=handle^.playerState.videoCodecContext^.height;
frameInfo^.details.video.aspectRatio:=handle^.playerState.videoCodecContext^.width/handle^.playerState.videoCodecContext^.height;
frameInfo^.details.video.framerate:=0;
frameInfo^.details.video.languageCode:=LANGUAGE_CODE_ENG;
frameInfo^.pData:=handle^.playerState.Buffer(1,videoData);
spin_unlock(lock);
Result:=True;
end;
end;
function ps4_sceAvPlayerGetVideoDataEx(handle:SceAvPlayerHandle;frameInfo:PSceAvPlayerFrameInfoEx):Boolean; SysV_ABI_CDecl;
begin
_sig_lock;
Result:=_sceAvPlayerGetVideoDataEx(handle,frameInfo);
_sig_unlock;
end;
function ps4_sceAvPlayerGetVideoData(handle:SceAvPlayerHandle;frameInfo:PSceAvPlayerFrameInfo):Boolean; SysV_ABI_CDecl;
begin
Writeln(SysLogPrefix,'sceAvPlayerGetVideoData');
// TODO: Rely on ps4_sceAvPlayerGetVideoDataEx to get the frame
Result:=False;
end;
function _sceAvPlayerStop(handle:SceAvPlayerHandle):Integer;
begin
Result:=-1;
if (handle=nil) then Exit;
Writeln(SysLogPrefix,'sceAvPlayerStop');
handle^.playerState.FreeMedia;
Result:=0;
end;
function ps4_sceAvPlayerStop(handle:SceAvPlayerHandle):Integer; SysV_ABI_CDecl;
begin
_sig_lock;
Result:=_sceAvPlayerStop(handle);
_sig_unlock;
end;
function _sceAvPlayerClose(handle:SceAvPlayerHandle):Integer;
begin
Result:=-1;
if (handle=nil) then Exit;
if (handle^.playerState<>nil) then
begin
handle^.playerState.Free;
end;
Dispose(handle);
Result:=0;
end;
function ps4_sceAvPlayerClose(handle:SceAvPlayerHandle):Integer; SysV_ABI_CDecl;
begin
_sig_lock;
Result:=_sceAvPlayerClose(handle);
_sig_unlock;
end;
function Load_libSceAvPlayer(Const name:RawByteString):TElf_node;
var
lib:PLIBRARY;
begin
Result:=TElf_node.Create;
Result.pFileName:=name;
lib:=Result._add_lib('libSceAvPlayer');
lib^.set_proc($692EBA448D201A0A,@ps4_sceAvPlayerInit);
lib^.set_proc($A3D79646448BF8CE,@ps4_sceAvPlayerInitEx);
lib^.set_proc($1C3D58295536EBF3,@ps4_sceAvPlayerPostInit);
lib^.set_proc($28C7046BEAC7B08A,@ps4_sceAvPlayerAddSource);
lib^.set_proc($51B42861AC0EB1F6,@ps4_sceAvPlayerIsActive);
lib^.set_proc($395B61B34C467E1A,@ps4_sceAvPlayerSetLooping);
lib^.set_proc($5A7A7539572B6609,@ps4_sceAvPlayerGetAudioData);
lib^.set_proc($25D92C42EF2935D4,@ps4_sceAvPlayerGetVideoDataEx);
lib^.set_proc($642D7BC37BC1E4BA,@ps4_sceAvPlayerStop);
lib^.set_proc($3642700F32A6225C,@ps4_sceAvPlayerClose);
end;
initialization
ps4_app.RegistredPreLoad('libSceAvPlayer.prx',@Load_libSceAvPlayer);
end.

View File

@ -34,6 +34,7 @@ function SwGetThreadTime(var ut:QWORD):Boolean;
procedure SwGetSystemTimeAsFileTime(var lpSystemTimeAsFileTime:TFILETIME);
procedure Swgettimezone(z:Ptimezone);
function Swgetntptimeofday(tp:Ptimespec;z:Ptimezone):Integer;
function SwGetTimeUsec:QWORD;
Const
FILETIME_1970 =116444736000000000;
@ -279,6 +280,25 @@ begin
Result:=0;
end;
function SwGetTimeUsec:QWORD;
var
pc,pf:QWORD;
DW0,DW1:QWORD;
begin
pc:=0;
pf:=1;
_sig_lock;
NtQueryPerformanceCounter(@pc,@pf);
_sig_unlock;
//DW0*1000000/pf + SHL_32* DW1*1000000/pf
DW0:=(DWORD(pc shr 00)*1000000) div pf;
DW1:=(DWORD(pc shr 32)*1000000) div pf;
Result:=DW0+(DW1 shl 32);
end;
end.

View File

@ -263,7 +263,7 @@ begin
Assert(t<>nil,'create sparse buffer fail');
end;
else
Assert(false,'_is_sparce');
Assert(false,'Is not GPU Addr:'+HexStr(Addr));
end;
t.FAddr:=addr; //save key