你的分享就是我们的动力 ---﹥

一个基于SDL和ffmpeg的播放器,也可以作为android的视频播放库

时间:2013-05-29 17:57来源:www.chengxuyuans.com 点击:

代码简介

编解码基于ffmpeg 
显示采用的是SDL。 
视频同步采用的是视频同步到音频,
所以如果没有音频的话 视频会播放的很快,囧。。
主要参考了这篇文章:
《如何用 FFmpeg 编写一个简单播放器详 FFmpeg 编写一个简单播放器详
细步骤介绍》

在此基础上做了改造与封装
myblog: 
http://blog.sina.com.cn/u/1214839174

吐槽一下 为什么不认识makefile啊。。。 只能改成makefile.sh了。。。

代码片段

#include <string.h>
#include <stdio.h>
#include <time.h>
#include <errno.h>
#include <pthread.h>
#include "ff_queue.h"
#include "ff_play.h"

#ifdef ANDROID_LOG
#include <android/log.h>
#define LOGI(format,...)  //__android_log_print(ANDROID_LOG_INFO ,"hello_hl","file[%s] line[%d] "format"",__FILE__, __LINE__ ,##__VA_ARGS__)
#define LOGE(format,...)  __android_log_print(ANDROID_LOG_ERROR,"hello_hl","file[%s] line[%d] "format"",__FILE__, __LINE__ ,##__VA_ARGS__)
#else
#define LOGI(format,...)  // printf("file[%s] line[%d] "format"\n",__FILE__, __LINE__ ,##__VA_ARGS__)
#define LOGE(format,...)  printf("file[%s] line[%d] "format"\n",__FILE__, __LINE__ ,##__VA_ARGS__)
#endif

/*上下文*/
AVFormatContext *FormatCtx = NULL;
AVCodecContext *VideoCodecCtx=NULL;
AVCodecContext *AudioCodecCtx=NULL;
AVCodec *Codec=NULL;
/*帧*/
AVFrame *FrameVideo=NULL;
AVFrame *FrameVideoRGB=NULL;
AVFrame *FrameAudio=NULL;
/*视频像素格式*/
enum PixelFormat FrameRGB_Pix_Fmt;
/*视频流*/
int StreamVideo = -1;
int StreamAudio = -1;
/*音频测试缓冲区和文件*/
FILE* File_Audio_Record=NULL;
/*音视频队列*/
PacketQueue Queue_Audio ={0};
PacketQueue Queue_Video ={0};
/*线程定义*/
pthread_t Task_Read =0;
int Isrun_Read = 0; /*读线程状态*/
/*时钟定义*/
double Clock_Audio;
double Clock_Video;
unsigned char  Delay_Muti=0;  /*视频延时倍率*/
#define DELAY_BASE 50	/*视频延时基准,单位豪秒*/
#define INTERVAL_MAX 0.5	/*音视频 最大时钟差,单位秒*/
#define INTERVAL_ALERT 0.1 /*视频过快超过此时间,则开始增加延迟倍率*/
int Seek_Time=0;   /*播放的前进后退时间,单位秒*/

/*缓冲清刷标志位*/
int Flush_Video=0;
int Flush_Audio=0;

//#define  ONLY_I_FRAME			/*是否只取I帧*/


/*save frame to ppm   path 是形如 /mnt/sdcard/这种形式  */
void ff_play_SaveFrame(AVFrame *FrameVideo, char*  path, int width, int height, int iFrame) {
	FILE *pFile;
	char szFilename[100];
	int  y;
	// Open file
	sprintf(szFilename, "%sframe%05d.ppm", path, iFrame);
	pFile=fopen(szFilename, "wb");
	if(pFile==NULL)
		return;
	// Write header
	fprintf(pFile, "P6\n%d %d\n255\n", width, height);
	// Write pixel data
	for(y=0; y<height; y++)
		fwrite(FrameVideo->data[0]+y*FrameVideo->linesize[0], 1, width*3, pFile);
	// Close file
	fclose(pFile);
}

/*save sound to pcm , path 是形如 /mnt/sdcard/这种形式 */
void ff_play_SaveAudio(AVFrame *FrameVideo, char*  path, int bufsize) {
	FILE *pFile;
	char szFilename[100];
	int  y;
	// Open file
	if(File_Audio_Record == NULL){
		sprintf(szFilename, "%saudio.pcm", path);
		File_Audio_Record=fopen(szFilename, "wb");
		if(File_Audio_Record==NULL)
			return;
	}
	// Write data
	fwrite(FrameVideo->data[0],1,bufsize,File_Audio_Record);//pcm记录
	fflush(File_Audio_Record);
}

void ff_play_init_queue(void){
	ff_queue_init(&Queue_Audio);
	ff_queue_init(&Queue_Video);
}

void ff_play_allocFrame(){
	FrameVideo = avcodec_alloc_frame();
	FrameVideoRGB = avcodec_alloc_frame();
	FrameAudio = avcodec_alloc_frame();
}

/*free all resource*/
int ff_play_FreeAll(void){
	Isrun_Read = 0;
	/*等待读线程结束*/
	while(Isrun_Read >= 0);
	LOGE("free all 111111  !!!");
	if(FrameVideo != NULL)
		av_free(FrameVideo);
	if(FrameVideoRGB != NULL)
		av_free(FrameVideoRGB);
	if(FrameAudio != NULL)
			av_free(FrameAudio);
	if(VideoCodecCtx != NULL)
		avcodec_close(VideoCodecCtx);
	if(AudioCodecCtx != NULL)
			avcodec_close(AudioCodecCtx);
	if(FormatCtx!=NULL)
		avformat_close_input(&FormatCtx);
	if(File_Audio_Record != NULL)
		fclose(File_Audio_Record);
	/*init global vira*/
	FrameVideo=FormatCtx=FrameVideoRGB=VideoCodecCtx=AudioCodecCtx=File_Audio_Record=NULL;
	Flush_Video = Flush_Audio = Seek_Time = Clock_Video = Clock_Audio = Delay_Muti =0;
	StreamVideo= StreamAudio = -1;
	LOGE("free all 22222  !!!");
	return 0;
}

/*注册函数*/
void ff_play_register_all(){
	av_register_all();
	avcodec_register_all();
	avformat_network_init();
}


/*打开一个文件或者流 并初始化上下文*/
int ff_play_open_and_initctx(const char *pathStr){
	int res = 1, i;
	int numBytes;
	/*open stream or file*/
	res = avformat_open_input(&FormatCtx, pathStr, NULL, NULL);
	if (res < 0) {
		return res;
	}
	/*find stream*/
	res = avformat_find_stream_info(FormatCtx, NULL);
	if (res < 0) {
		return res;
	}
	/* find the first video and audio stream */
	for (i=0; i < FormatCtx->nb_streams; i++) {
		if (FormatCtx->streams[i]->codec->codec_type == AVMEDIA_TYPE_VIDEO) {
			StreamVideo = i;
		}
		if (FormatCtx->streams[i]->codec->codec_type == AVMEDIA_TYPE_AUDIO) {
			StreamAudio = i;
		}
		if(StreamVideo >=0 && StreamAudio >=0)
			break;
	}
	/*find the decoder for the video stream*/
	if(StreamVideo >=0 ){
		VideoCodecCtx = FormatCtx->streams[StreamVideo]->codec;
		Codec = avcodec_find_decoder(VideoCodecCtx->codec_id);
		if (Codec == NULL) {
			return AVERROR(EFF_PLAY_FIND_CODEC);
		} /*supprt TRUNCATE? */
		if (Codec->capabilities & CODEC_CAP_TRUNCATED) {
			VideoCodecCtx->flags |= CODEC_FLAG_TRUNCATED;
		}
		if(avcodec_open2(VideoCodecCtx, Codec, NULL) <0){
			return AVERROR(EFF_PLAY_OPEN_CODEC);
		}
		LOGE("have video stream[%d]",StreamVideo);
	}
	/*find the decoder for the audio stream*/
	if(StreamAudio >=0 ){
		AudioCodecCtx = FormatCtx->streams[StreamAudio]->codec;
		Codec = avcodec_find_decoder(AudioCodecCtx->codec_id);
		if (Codec == NULL) {
			return AVERROR(EFF_PLAY_FIND_CODEC);
		} /*supprt TRUNCATE? */
		if (Codec->capabilities & CODEC_CAP_TRUNCATED) {
			AudioCodecCtx->flags |= CODEC_FLAG_TRUNCATED;
		}
		if(avcodec_open2(AudioCodecCtx, Codec, NULL) <0){
			return AVERROR(EFF_PLAY_OPEN_CODEC);
		}
		LOGE("have audio stream[%d]",StreamAudio);
	}
	ff_play_init_queue();
	ff_play_allocFrame();
	return 0;
}

int  ff_play_picture_fill(void* pixels,int width,int height,enum PixelFormat pix_fmt){
	int res;
	/*point frame->data to pixels; if already, do nothing*/
	if(FrameVideoRGB->data[0] != pixels){
		res = avpicture_fill((AVPicture *) FrameVideoRGB, pixels, pix_fmt,
				width, height) <0;
		FrameRGB_Pix_Fmt = pix_fmt;
		return res;
	}
	return 0;
}

int ff_play_getvideo_height(void){
	return VideoCodecCtx != NULL? VideoCodecCtx->height:0;
}
int ff_play_getvideo_width(void){
	return VideoCodecCtx != NULL? VideoCodecCtx->width:0 ;
}
int ff_play_getaudio_samplerate(void){
	return AudioCodecCtx != NULL ? AudioCodecCtx->sample_rate:0;
}

int ff_play_getaudio_channels(void){
	return AudioCodecCtx != NULL ? AudioCodecCtx->channels:0;
}

/*从音频队列上取一个包 写入audiobuf, 这里的audiobuf 可以是音频设备的缓冲也可是单纯的buf*/
int ff_play_getaudiopkt2play(void *audio_buf, int max_len){
	int ret;
	int len_get = 0,len_left=0;
	int data_size=0;
	uint8_t *pbuf;
	int pkt_size;
	int handle_finished=0,frameFinished = 0;
	AVPacket packet;
	uint8_t *FR_pkt_data_org; /*用于保存packet.data指向的原地址*/

	av_init_packet(&packet);
	pbuf = (uint8_t*)audio_buf;

	do{/*清刷缓冲区*/
		if(Flush_Audio == 1){
			avcodec_flush_buffers(AudioCodecCtx);
			Flush_Audio = 0;
			LOGI("FLUSH OK!!!");
		}
		LOGI("11111!!!");

		ret = ff_queue_packet_get(&Queue_Audio,&packet,0);
		if(ret<=0){
			break;
		}
		LOGI("22222 stream_idx[%d], stream_audio[%d]!!!",packet.stream_index ,StreamAudio );
		if (packet.stream_index != StreamAudio){
			av_free_packet(&packet);
			continue;
		}
		LOGI("33333!!!");

		FR_pkt_data_org = packet.data; /*保留packet指向的缓冲区初始地址,这个地址如果不对,将来释放的时候会出错*/
		len_left = (max_len > 0 ? max_len : 1000000 );
		while(packet.size>0 && len_left>0) { /*音频解码的时候可能一次不能把pkt全解完,要多次*/
			/*解码, 这里的len get是packet的偏移 而不是解码出来的长度, 切不要弄错!!*/
			len_get = avcodec_decode_audio4(AudioCodecCtx,FrameAudio,&frameFinished,&packet);
			if (len_get<0){
				break;
			}
			if(frameFinished){  /*获取了一帧 */
				/*data_size 才是解码的长度*/
				data_size = av_samples_get_buffer_size(FrameAudio->linesize, AudioCodecCtx->channels,
						FrameAudio->nb_samples,AudioCodecCtx->sample_fmt, 0);
				data_size = ((data_size > len_left && len_left >0 )?len_left:data_size);
				memmove((void *)pbuf,(void *)FrameAudio->data[0],data_size);
			}/*注意,指向缓冲区的指针后移了,释放的时候要把它重置*/
			packet.data += len_get;
			packet.size -= len_get;
			pbuf += data_size;  /*pbuf 向后移动,向输出缓冲填入新的声音*/
			len_left -= data_size; /*缓冲区剩余空间减少*/
			LOGI("55555 decode len_get[%d],len_left[%d] data_size[%d] frameFinished[%d]!!!", \
					len_get, len_left,data_size,frameFinished);
		}
		/*获取音频时间基准保存到 Clock_Audio*/
		if(packet.pts != AV_NOPTS_VALUE){
			Clock_Audio = av_q2d(FormatCtx->streams[StreamAudio]->time_base)*packet.pts;
		}
		handle_finished = 1;

		packet.data = FR_pkt_data_org; /*重置*/
		av_free_packet(&packet);

	}while(handle_finished <1);
	ret = pbuf - (uint8_t*)audio_buf;
//	LOGE("44444 ret[%d]!!!", ret);
	return  ret;
}


/*从Queue_Video里面取一个包显示*/
void ff_play_getvideopkt2display(void *arg){
	AVPacket packet;
	struct SwsContext *img_convert_ctx;
	int handle_finished,frameFinished = 0,ret;
	static int FS_discard_frame=0;  /*丢帧标志*/

	do{/*清刷缓冲区*/
		if(Flush_Video == 1){
			avcodec_flush_buffers(VideoCodecCtx);
			Flush_Video = 0;
		}
		ret = ff_queue_packet_get(&Queue_Video,&packet,0);
		if(ret<=0){
			break;
		}
		if(packet.stream_index != StreamVideo){
			av_free_packet(&packet);
			continue;
		}/*获取视频时间基准保存到 Clock_Video*/
		if(packet.pts != AV_NOPTS_VALUE){
			Clock_Video = av_q2d(FormatCtx->streams[StreamVideo]->time_base)*packet.pts;
		}
		LOGI("Discard flag[%d], PKT_FLAG[%d], clk_diff[%f] clk_video[%f],head[0x %x %x %x %x %x %x %x]", \
			FS_discard_frame, packet.flags & AV_PKT_FLAG_KEY, Clock_Video-Clock_Audio, Clock_Video, \
			packet.data[0],packet.data[1],packet.data[2],packet.data[3],packet.data[4],packet.data[5],packet.data[6]);
		/*视频太超前,增加延时, 差距越大,增加延迟越多*/
		if( (StreamAudio >=0 ) && Clock_Video - Clock_Audio > INTERVAL_ALERT){
			if(Clock_Video - Clock_Audio > INTERVAL_ALERT*(Delay_Muti+1) ){
				Delay_Muti++;
			}
			usleep( Delay_Muti * DELAY_BASE * 1000);
			LOGI("DELAY_MUTI[%d] diff[%f]", Delay_Muti, Clock_Video-Clock_Audio);
		}
		/*视频时间太落后,进入丢帧状态, 同时延时标志置为0*/
		if( ( StreamAudio >=0 ) &&  (Clock_Video < Clock_Audio - INTERVAL_MAX) && (packet.flags & AV_PKT_FLAG_KEY)){
			Delay_Muti = 0;
			FS_discard_frame =1;
		}

		/*丢帧状态中,判断是否要继续丢帧, 超过音频则不再丢*/
		if(FS_discard_frame){
			if( (StreamAudio >=0 ) &&  (Clock_Video >= Clock_Audio ) && (packet.flags & AV_PKT_FLAG_KEY)){
				FS_discard_frame =0;
			} /*丢掉关键帧之外的帧*/
			else if(!(packet.flags & AV_PKT_FLAG_KEY) ){
					av_free_packet(&packet);
					continue;
			}
		}
		avcodec_decode_video2(VideoCodecCtx, FrameVideo, &frameFinished, &packet);
		LOGI("PKT_FLAG[%d] , pic type is[%d]",packet.flags,FrameVideo->pict_type);
		if(frameFinished <=0){
			av_free_packet(&packet);
			continue;
		}
		/*测试只取关键帧*/
		/*	if(FrameVideo->key_frame != 1) */
		img_convert_ctx = sws_getContext(VideoCodecCtx->width,
				VideoCodecCtx->height, VideoCodecCtx->pix_fmt, VideoCodecCtx->width,
				VideoCodecCtx->height, FrameRGB_Pix_Fmt, SWS_BICUBIC, NULL, NULL,
				NULL);
		if (img_convert_ctx == NULL) { /*获取不到对应上下文直接返回*/
			av_free_packet(&packet);
			return;
		}
		sws_scale(img_convert_ctx,
				(const uint8_t* const *) FrameVideo->data, FrameVideo->linesize,
				0, VideoCodecCtx->height, FrameVideoRGB->data,
				FrameVideoRGB->linesize);
		handle_finished = 1;
		av_free_packet(&packet);
	}while(handle_finished <1);
}

void ff_play_seek_to(int seek_seconds){
	double  clock;
	int64_t seek_pos;
	int stream_index = -1;
	int seek_flags;

	/*获取时钟*/
	if (StreamVideo >= 0){
		stream_index = StreamVideo;
		clock = Clock_Video;
	}else if(StreamAudio >= 0){
		stream_index = StreamAudio;
		clock = Clock_Audio;
	}
	seek_pos = (clock + seek_seconds) * AV_TIME_BASE;

	if(stream_index<0)
		return;

	/*这里av_rescale_q(a,b,c)是用来把时间戳从一个时基调整到另外一个时基时候用的函数。
	 * 它基本的动作是计算a*b/c ——+ 这是从网上抄的 具体作用不明*/
	seek_pos= av_rescale_q(seek_pos, AV_TIME_BASE_Q,
		FormatCtx->streams[stream_index]->time_base);

	seek_flags = seek_seconds < 0 ? AVSEEK_FLAG_BACKWARD : 0;
	LOGI("stream_index[%d] ,seek_pos[%lld] ,seek_flags[%d]",
			stream_index, seek_pos, seek_flags);
	if(av_seek_frame(FormatCtx, stream_index,seek_pos, seek_flags) < 0){
		LOGI("error when seeking!!!!");
	}
}


/*从stream里面取包挂到queue上面*/
void* ff_play_readpkt_thread(void *arg) {
	AVPacket packet;
	int res=-1;
	double pkt_pts;
	int stream_index= -1;
	int64_t seek_target;

	while ( Isrun_Read >0 ) {
		/*视频前进后退 ,移动Format上下文之后清空队列和缓存*/
		if(Seek_Time != 0){
			ff_play_seek_to(Seek_Time);
			packet_queue_flush(&Queue_Video);
			packet_queue_flush(&Queue_Audio);
			/*设置清刷标志*/
			Flush_Video = 1;
			Flush_Audio = 1;
			LOGI("hahahaha  seektime [%d]!!!!", Seek_Time);
			Seek_Time = 0;
		}
		res = av_read_frame(FormatCtx, &packet);
		//LOGE("read frame res[%d] stream_idx[%d]",res ,packet.stream_index);
		if(res < 0) continue;
		// Is this a packet from the video stream?
		if (packet.stream_index == StreamVideo) {
			ff_queue_packet_put(&Queue_Video, &packet);
		} else if (packet.stream_index == StreamAudio) {
			ff_queue_packet_put(&Queue_Audio, &packet);
		} else {
			av_free_packet(&packet);
		}
	}
	packet_queue_flush(&Queue_Video);
	packet_queue_flush(&Queue_Audio);
	Isrun_Read = -1;
	return NULL;
}

/*启动读包线程*/
int ff_play_begin_read_thread(void){
	int res;
	res = pthread_create(&Task_Read, NULL, ff_play_readpkt_thread, NULL);
	Isrun_Read = 1;
	return res;
}

/*快进或 快退跳转*/
void ff_play_jump(int second){
	Seek_Time =  second;
}


代码片段

#ifndef __FF_PLAY_H__
#define __FF_PLAY_H__

#include "libavcodec/avcodec.h"
#include "libavformat/avformat.h"
#include "libswscale/swscale.h"
#include "libavutil/error.h"

/*错误码*/
#define EFF_PLAY_BASE 1000
#define EFF_PLAY_FIND_CODEC    EFF_PLAY_BASE + 1
#define EFF_PLAY_OPEN_CODEC    EFF_PLAY_BASE + 2

/*注册函数*/
void ff_play_register_all(void);

/*free all resource*/
int ff_play_FreeAll(void);

/*save frame to ppm   path 是形如 /mnt/sdcard/这种形式  */
void ff_play_SaveFrame(AVFrame *pFrame, char*  path, int width, int height, int iFrame);

/*save sound to pcm , path 是形如 /mnt/sdcard/这种形式 */
void ff_play_SaveAudio(AVFrame *pFrame, char*  path, int bufsize);

/*打开一个文件或者流 并初始化上下文*/
int ff_play_open_and_initctx(const char *pathStr);

/*获取音频包放到 arg 所指定的buf中播放*/
int ff_play_getaudiopkt2play(void *audio_buf, int max_len);

/*获取一个视频包显示*/
void ff_play_getvideopkt2display(void *arg);

/*从stream里面取包挂到queue上面*/
void* ff_play_readpkt_thread(void *arg);

/*创建取包线程*/
int ff_play_begin_read_thread(void);

/*把pixels 指向的缓冲区作为 FrameVideoRGB的 填充区*/
int  ff_play_picture_fill(void* pixels,int width,int height,enum PixelFormat pix_fmt);

/*获取宽、高、采样率*/
int ff_play_getvideo_height(void);
int ff_play_getvideo_width(void);
int ff_play_getaudio_samplerate(void);
int ff_play_getaudio_channels(void);

/*快进或 快退跳转*/
void ff_play_jump(int second);

#endif

代码片段

#include "ff_queue.h"
#include <stdio.h>
#include <SDL/SDL.h>
#ifndef SDL_MUTEX
#include <pthread.h>
#else
#include <SDL/SDL_thread.h>
#endif

/*队列初始化*/
void ff_queue_init(PacketQueue *q) {
	memset(q, 0, sizeof(PacketQueue));
}

/*设置退出标志*/
void ff_queue_set_quit(PacketQueue *q){
	q->quit_flag =1;
}

/*将一个packet放到队列*/
int ff_queue_packet_put(PacketQueue *q, AVPacket *pkt) {
	AVPacketList *pkt1;
	if (av_dup_packet(pkt) < 0) {
		return -1;
	}
	pkt1 = av_malloc(sizeof(AVPacketList));
	if (!pkt1)
		return -1;
	pkt1->pkt = *pkt;
	pkt1->next = NULL;
#ifndef SDL_MUTEX
	pthread_mutex_lock(&q->mutex);
#else
	SDL_LockMutex(q->mutex);
#endif
	if (!q->last_pkt)
		q->first_pkt = pkt1;
	else
		q->last_pkt->next = pkt1;
	q->last_pkt = pkt1;
	q->nb_packets++;
	q->size += pkt1->pkt.size;
#ifndef SDL_MUTEX
	pthread_mutex_unlock(&q->mutex);
#else
	SDL_CondSignal(q->cond);
	SDL_UnlockMutex(q->mutex);
#endif
	return 0;
}

/*从队列中获取一个packet*/
int ff_queue_packet_get(PacketQueue *q, AVPacket *pkt, int block) {
	AVPacketList *pkt1;
	int ret;

#ifndef SDL_MUTEX
	pthread_mutex_lock(&q->mutex);
#else
	SDL_LockMutex(q->mutex);
#endif
	for (;;) {
		if (q->quit_flag) {
			ret = -1;
			break;
		}
		pkt1 = q->first_pkt;
		if (pkt1) {
			q->first_pkt = pkt1->next;
			if (!q->first_pkt)
				q->last_pkt = NULL;
			q->nb_packets--;
			q->size -= pkt1->pkt.size;
			*pkt = pkt1->pkt;
			av_free(pkt1);
			ret = 1;
			break;
		}else if (!block) {
			ret = 0;
			break;
		}else {
			/*百度百科:在调用pthread_cond_wait()前必须由本线程加锁(pthread_mutex_lock()),
			 * 而在更新条件等待队列以前,mutex保持锁定状态,并在线程挂起进入等待前解锁。
			 * 在条件满足从而离开pthread_cond_wait()之前,mutex将被重新加锁,
			 * 以与进入pthread_cond_wait()前的加锁动作对应*/
#ifndef SDL_MUTEX
			pthread_cond_wait(&q->cond, &q->mutex);
#else
			SDL_CondWait(q->cond, q->mutex);
#endif
		}
	}
#ifndef SDL_MUTEX
	pthread_mutex_unlock(&q->mutex);
#else
	SDL_UnlockMutex(q->mutex);
#endif
	return ret;
}


void packet_queue_flush(PacketQueue *q) {
  AVPacketList *pkt, *pkt1;

#ifndef SDL_MUTEX
	pthread_mutex_lock(&q->mutex);
#else
	SDL_LockMutex(q->mutex);
#endif

  for(pkt = q->first_pkt; pkt != NULL; pkt = pkt1) {
    pkt1 = pkt->next;
    av_free_packet(&pkt->pkt);
    av_freep(&pkt);
  }
  q->last_pkt = NULL;
  q->first_pkt = NULL;
  q->nb_packets = 0;
  q->size = 0;
#ifndef SDL_MUTEX
	pthread_mutex_unlock(&q->mutex);
#else
	SDL_UnlockMutex(q->mutex);
#endif
}




代码片段

#ifndef __FF_QUEUE_H__
#define __FF_QUEUE_H__

#ifndef SDL_MUTEX
#include <pthread.h>
#else
#include <SDL/SDL_thread.h>
#endif

#include "libavformat/avformat.h"

typedef struct PacketQueue {
  AVPacketList *first_pkt, *last_pkt;
  int nb_packets;
  int size;
#ifndef SDL_MUTEX
  pthread_mutex_t mutex;
  pthread_cond_t  cond;
#else
  SDL_mutex *mutex;
  SDL_cond *cond;
#endif
  int quit_flag;/*退出标志*/
} PacketQueue;

/*队列初始化*/
void packet_queue_init(PacketQueue *q);

/*将一个packet放到队列*/
int packet_queue_put(PacketQueue *q, AVPacket *pkt);

/*从队列中获取一个packet, block表示是否要阻塞的获取*/
int packet_queue_get(PacketQueue *q, AVPacket *pkt, int block);

/*清空缓冲队列*/
void packet_queue_flush(PacketQueue *q);


#endif

代码片段

#include "ff_play.h"
#include <stdio.h>
#include <SDL/SDL.h>
#include <pthread.h>

#ifdef ANDROID_LOG
#include <android/log.h>
#define  LOGE(format,...)  __android_log_print(ANDROID_LOG_ERROR,"hello_hl","file[%s] line[%d] "format"",__FILE__, __LINE__ ,##__VA_ARGS__)
#else
#define  LOGE(format,...)  printf("file[%s] line[%d] "format"\n",__FILE__, __LINE__ ,##__VA_ARGS__)
#endif

#define BITS_PER_PIXEL 32
/*下面的bufsize要给足,不然声音会截断而失真,但给的大了声音播放又会变慢了,一般来说 4608/2 左右最好*/
#define SDL_AUDIO_BUFFER_SIZE 1024*2

int VideoWidth=0;
int VideoHeight=0;
SDL_Surface *Screen;
/*线程定义*/
pthread_t Task_Video =0;
pthread_t Task_Audio =0;
int Isrun_Video =1;
#define RUNNING 1
#define PAUSE 2
#define STOP 0
#define EXIT -1

/*初始化 open file , get stream, open codec*/
int init_stream(const char* path) {
	int res = 1, i;
	int numBytes;
	char pathStr[100]={0};
	
	ff_play_register_all();
	strcpy(pathStr,path);
	/*open and init ctx */
	res = ff_play_open_and_initctx(pathStr);
	if(res < 0){
		LOGE("_open_and_initctx res[%d]",res);
		return res;
	}
	/*启动读线程*/
	res = ff_play_begin_read_thread();
	VideoWidth =ff_play_getvideo_width();
	VideoHeight=ff_play_getvideo_height();
	LOGE("begin thread res[%d], videowidth[%d], height[%d]",res,VideoWidth,VideoHeight);
	return res;
}

/*视频线程*/
void* my_play_video_thread(void *arg) {
	int i,res=-1;
	int ret;
	double pkt_pts;
	while(Isrun_Video){
		if(Isrun_Video == PAUSE) continue;
		SDL_LockSurface(Screen);
		VideoWidth =ff_play_getvideo_width();
		VideoHeight=ff_play_getvideo_height();
		/*point frame->data to pixels; if already, do nothing*/
		ret = ff_play_picture_fill(Screen->pixels,VideoWidth, VideoHeight,PIX_FMT_BGRA);
		if(ret <0){
			return NULL;
		}
		ff_play_getvideopkt2display(NULL);
		SDL_UnlockSurface(Screen);
		SDL_UpdateRect(Screen, 0, 0, 0, 0);
	}
	Isrun_Video = EXIT;
	return NULL;
}

/*这个是从网上拷贝的加了缓冲区的音频回调函数, 说实话播放效果没听出有什么不同,不过避免了因为SDL
音频缓冲区过小而导致的段错误*/
void  my_play_sdl_audio_callback2(void *userdata, Uint8 *stream, int max_len) {

	AVCodecContext *aCodecCtx = (AVCodecContext *) userdata;
	int len, len1, audio_size;
	static uint8_t audio_buf[(AVCODEC_MAX_AUDIO_FRAME_SIZE * 3) / 2];
	static unsigned int audio_buf_size = 0;
	static unsigned int audio_buf_index = 0;
	len = max_len;
	while (len > 0) {
		if (audio_buf_index >= audio_buf_size) {
			/* We have already sent all our data; get more */
			audio_size = ff_play_getaudiopkt2play((void *) audio_buf, max_len);
			if (audio_size <= 0) {
				/* If error, output silence */
				audio_buf_size = 1024; // arbitrary?
				memset(audio_buf, 0, audio_buf_size);
			} else {
				audio_buf_size = audio_size;
			}
			audio_buf_index = 0;
		}
		len1 = audio_buf_size - audio_buf_index;
		if (len1 > len)
			len1 = len;
		SDL_MixAudio(stream,audio_buf,len1,SDL_MIX_MAXVOLUME);
		//memcpy(stream, (uint8_t *) audio_buf + audio_buf_index, len1);
		len -= len1;
		stream += len1;
		audio_buf_index += len1;
	}
}

void my_play_pause(){
	static FS_VideoState = RUNNING;
	int pause_sec;
	/*更改播放状态*/
	FS_VideoState = ( FS_VideoState ==  PAUSE ?  RUNNING : PAUSE);
	/*更改音频延时*/
	pause_sec =  ( FS_VideoState ==  PAUSE ?  1000000 : 0);
	SDL_PauseAudio(pause_sec);
	Isrun_Video = FS_VideoState ;
}


int main(int argc, void* argv[]){
	int i=0,res=0,x,y;
	char str_path[100]={0};
	SDL_Rect rect;
	SDL_AudioSpec   wanted_spec, spec;
	SDL_Event      event;

	/*打开文件*/
	strcpy(str_path,argv[1]);
	res = init_stream(str_path);
	if(res <0 ){
		fprintf(stderr, "open file fail code[%d]", res);
		return 0;
	}
	LOGE("open file ok!!");

	/*初始化视频*/
	if(SDL_Init(SDL_INIT_VIDEO | SDL_INIT_AUDIO | SDL_INIT_TIMER)) {
		fprintf(stderr, "Could not 	initialize SDL - %s", SDL_GetError());
		exit(1);
	}
	LOGE("init video ok!!");
	Screen = SDL_SetVideoMode(VideoWidth,VideoHeight, BITS_PER_PIXEL, SDL_HWSURFACE);
	if(!Screen) { 
		fprintf(stderr, "SDL: could not set video mode - exiting");
		exit(1);
	}
	LOGE("Screen ok BitsPerPixel[%d], BytesPerPixel[%d], pitch[%d] x[%d]y[%d]w[%d]h[%d]!!! ",
			Screen->format->BitsPerPixel, Screen->format->BytesPerPixel, Screen->pitch,
			Screen->clip_rect.x, Screen->clip_rect.y, Screen->clip_rect.w, Screen->clip_rect.h);
	LOGE("video is ready !!\n");

	/*初始化音频,音频需要流的信息*/
	wanted_spec.freq = ff_play_getaudio_samplerate();
	wanted_spec.format = AUDIO_S16LSB;
	wanted_spec.channels = ff_play_getaudio_channels();
	wanted_spec.silence = 0;
	wanted_spec.samples = SDL_AUDIO_BUFFER_SIZE;
	wanted_spec.callback = my_play_sdl_audio_callback2;
	wanted_spec.userdata = NULL;
	LOGE("samplerate[%d],channle[%d]", wanted_spec.freq,wanted_spec.channels);
	if (SDL_OpenAudio(&wanted_spec, &spec) < 0) {
	            fprintf(stderr, "SDL_OpenAudio: %s\n", SDL_GetError());
	            return -1;
	}
	LOGE("ready to play!!");

	/*let's play*/
	SDL_PauseAudio(0);
	res = pthread_create(&Task_Video, NULL, my_play_video_thread, NULL);
	while(1){
		SDL_PollEvent(&event);
		switch(event.type) {
			case SDL_QUIT:
				Isrun_Video = STOP;
				while(Isrun_Video != EXIT);
				SDL_CloseAudio();
				ff_play_FreeAll();
				SDL_Quit();
				return 0;
			case SDL_KEYDOWN:
				switch (event.key.keysym.sym) {
				case SDLK_RIGHT:
					ff_play_jump(1);
//					LOGE("SDLK_RIGHT DOWN!!!!!");
					break;
				case SDLK_LEFT:
					ff_play_jump(-1);
//					LOGE("SDLK_LEFT DOWN!!!!!");
					break;
				case SDLK_UP:
//					LOGE("SDLK_UP DOWN!!!!!");
					break;
				case SDLK_DOWN:
//					LOGE("SDLK_DOWN DOWN!!!!!");
					break;
				case SDLK_SPACE:
					my_play_pause();

				}break;
			default:
				break;
		}
	}
	return 0;
}

代码片段

#include <string.h>
#include <jni.h>
#include <time.h>
#include <android/log.h>
#include <android/bitmap.h>
#include "libavcodec/avcodec.h"
#include "libavformat/avformat.h"
#include "libswscale/swscale.h"
#include "ff_play.h"

#ifdef ANDROID_LOG
#include <android/log.h>
#define LOGI(format,...)  //__android_log_print(ANDROID_LOG_INFO ,"hello_hl","file[%s] line[%d] "format"",__FILE__, __LINE__ ,##__VA_ARGS__)
#define LOGE(format,...)  __android_log_print(ANDROID_LOG_ERROR,"hello_hl","file[%s] line[%d] "format"",__FILE__, __LINE__ ,##__VA_ARGS__)
#else
#define LOGI(format,...)  // printf("file[%s] line[%d] "format"\n",__FILE__, __LINE__ ,##__VA_ARGS__)
#define LOGE(format,...)  printf("file[%s] line[%d] "format"\n",__FILE__, __LINE__ ,##__VA_ARGS__)
#endif

#define PRINT_DURATION  print_duration(__FUNCTION__,__LINE__)

unsigned int clk_before=0;
int framecount=0;


void print_duration(const char *func, int line){
	LOGE("func[%s] line[%d]  time duration is [%d]",func, line, (clock()- clk_before)/1000 );
	clk_before = clock();
}

/*jstring 转换成string
 * 注意C 和 C++ 的env 不同, C里面要 (*env)->xxx C++里面要env->xxx
 * 这个是通用的 C调用java的方法 就是利用了GetMethodID */
char* jstringTostring(JNIEnv* env, jstring jstr)
{
       char* rtn = NULL;
       jclass clsstring = (*env)->FindClass(env,"java/lang/String");
       jstring strencode = (*env)->NewStringUTF(env,"utf-8");
       jmethodID mid = (*env)->GetMethodID(env,clsstring, "getBytes", "(Ljava/lang/String;)[B");
       jbyteArray barr= (jbyteArray)(*env)->CallObjectMethod(env,jstr, mid, strencode);
       jsize alen = (*env)->GetArrayLength(env,barr);
       jbyte* ba = (*env)->GetByteArrayElements(env,barr, JNI_FALSE);
       if (alen > 0)
       {
			rtn = (char*)malloc(alen + 1);
			memcpy(rtn, ba, alen);
			rtn[alen] = 0;
       }
       (*env)->ReleaseByteArrayElements(env,barr, ba, 0);
       return rtn;
}

/*初始化 open file , get stream, open codec*/
jint Java_com_example_player_PlayActivity_InitStream(JNIEnv* env,
		jobject this, jstring path) {
	int res = 1, i;
	int numBytes;
	char *pathStr;
	av_register_all();
	avcodec_register_all();
	avformat_network_init();
	/*this func will apply space for pFormatCtx,just like "new"*/
	if( (pathStr = jstringTostring(env,path)) == NULL )
		return  -1999;
	/*open and init ctx */
	res = ff_play_open_and_initctx(pathStr);
	if(res < 0){
		LOGE("_open_and_initctx res[%d]",res);
		return res;
	}
	/*分配帧*/
	ff_play_allocFrame();
	/*启动读线程*/
	res = ff_play_begin_read_thread();
	LOGE("begin thread res[%d]",res);
	return res;
}

jint Java_com_example_player_PlayActivity_getFrameHeight(JNIEnv * env, jobject this){
	return ff_play_getvideo_height();
}

jint Java_com_example_player_PlayActivity_getFrameWidth(JNIEnv * env, jobject this){
	return ff_play_getvideo_width();
}

/*free all resource*/
jint Java_com_example_player_PlayActivity_frameFree(JNIEnv * env, jobject this){
	return ff_play_FreeAll();
}

/*TODO 根据流的内容绘图*/
void Java_com_example_player_PlayActivity_drawFrame(JNIEnv * env, jobject this,
		jstring bitmap) {
	AndroidBitmapInfo info;
	void* pixels;
	int ret;
	uint8_t *pktdata;
	int pktsize;
	int len_get = AVCODEC_MAX_AUDIO_FRAME_SIZE*100;
	int data_size=0;
	int handle_finished,frameFinished = 0;
	AVPacket packet;
	static struct SwsContext *img_convert_ctx;
	int64_t seek_target;
	int target_width = 0;
	int target_height = 0;

	if ((ret = AndroidBitmap_getInfo(env, bitmap, &info)) < 0) {
		LOGE("AndroidBitmap_getInfo() failed ! error=%d", ret);
		return;
	}
	LOGE("Checked on the bitmap");

	if ((ret = AndroidBitmap_lockPixels(env, bitmap, &pixels)) < 0) {
		LOGE("AndroidBitmap_lockPixels() failed ! error=%d", ret);
		return;
	}
	LOGE("Grabbed the pixels addr[0x%x]", pixels);

	/*point frame->data to pixels; if already, do nothing*/
	ret = ff_play_picture_fill(pixels,info.width, info.height,PIX_FMT_RGBA);
	if(ret <0){
		AndroidBitmap_unlockPixels(env, bitmap);
		return;
	}
	PRINT_DURATION;
	handle_finished = 0;
	ff_play_getvideopkt2display(NULL);
	AndroidBitmap_unlockPixels(env, bitmap);
}

jint Java_com_example_player_FFAudio_getAudioSamplerate(void) {
	 return ff_play_getaudio_samplerate();
}

jint Java_com_example_player_FFAudio_getAudioBuf(JNIEnv * env, jobject this,jbyteArray jAudioBuf) {
	int ret;
	int len_get = 0;
	int data_size=0;
	uint8_t *audio_output, *pBuf;
	void *pDebug=NULL;
	int handle_finished=0,frameFinished = 0;
	AVPacket packet;

	audio_output = (*env)->GetByteArrayElements(env,jAudioBuf,JNI_FALSE);
//	PRINT_DURATION;
	data_size = ff_play_getaudiopkt2play((void *)audio_output,0);

	(*env)->ReleaseByteArrayElements(env,jAudioBuf,audio_output,0);
	PRINT_DURATION;
	return data_size;
}




转载注明地址:http://www.chengxuyuans.com/code/C++/63072.html

推荐文章