本文介绍: 正点原子的阿尔法linux开发板,拉流rtsp,并播放。

书接上文,正点原子linux开发板使用ffmpeg api播放视频
现在可以从RTSP拉流了。

视频效果:B站播放拉流的效果
网盘链接
链接:https://pan.baidu.com/s/1ix5OoGJb877tryAETQRMgw
提取码:jc05

上一篇的代码存在内存泄漏的问题,因为在VideoConvert()函数申请了frame的结构,但是我知道使用哪个API能够释放内存。之前在解码时每次都会申请,现在播放码流前只申请一次。解码时之传入参数,不再申请frame,内存泄漏依旧有,大概一分钟增加1MB内存,后面再说吧。

现在存在的问题,如果码流比较大,就会花屏,所以演示视频是播放的时间,因为变化的区域比较小。 而且播放四五分钟后,也会出现部分花屏,这个得等以后了解更多再解决,现在只是跑通代码流程就行。

达到好效果也可以参考ffplay的代码,昨晚用ffplay播放rtsp很流畅,代码路径在ffmpeg源码的fftoosffplay.c,总过3700多行,我还没看懂。

在上一篇的基础上,实现代码如下,新建test_004_rtsp.c

/*
 * Copyright (c) 2015 Ludmila Glinskih
 *
 * Permission is hereby granted, free of charge, to any person obtaining a copy
 * of this software and associated documentation files (the "Software"), to deal
 * in the Software without restriction, including without limitation the rights
 * to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
 * copies of the Software, and to permit persons to whom the Software is
 * furnished to do so, subject to the following conditions:
 *
 * The above copyright notice and this permission notice shall be included in
 * all copies or substantial portions of the Software.
 *
 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
 * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
 * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
 * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
 * THE SOFTWARE.
 */

/**
 * H264 pAVCodec test.
 */
#include <stdio.h>
#include <stdlib.h>
#include <fcntl.h>
#include <unistd.h>
#include <sys/mman.h>
#include <linux/fb.h>

#include "libavutil/adler32.h"
#include "libavcodec/avcodec.h"
#include "libavformat/avformat.h"
#include "libavutil/imgutils.h"

#include "libavfilter/avfilter.h"
#include "libavutil/avutil.h"
#include "libavutil/pixfmt.h"
#include "libavdevice/avdevice.h"
#include "libswscale/swscale.h"
#include "libswresample/swresample.h"

typedef unsigned char uint8_t;

int fbfd = 0;
static unsigned int *fbp = NULL;
struct fb_var_screeninfo vinfo;
struct fb_fix_screeninfo finfo;
int scrWid = 0;
int scrHeg = 0;

int open_fb()
{
    unsigned int screen_size;
    /* 打开framebuffer设备 */
    if (0 > (fbfd = open("/dev/fb0", O_RDWR)))
    {
        perror("open error");
        exit(EXIT_FAILURE);
    }

    /* 获取参数信息 */
    ioctl(fbfd, FBIOGET_VSCREENINFO, &vinfo);
    ioctl(fbfd, FBIOGET_FSCREENINFO, &finfo);

    screen_size = finfo.line_length * vinfo.yres;
    scrWid = vinfo.xres;
    scrHeg = vinfo.yres;

    /* 将显示缓冲区映射到进程地址空间 */
    fbp = mmap(NULL, screen_size, PROT_WRITE, MAP_SHARED, fbfd, 0);
    if (MAP_FAILED == (void *)fbp)
    {
        perror("mmap error");
        close(fbfd);
        exit(EXIT_FAILURE);
    }

    scrWid = vinfo.xres;
    scrHeg = vinfo.yres;
    printf("scrWid:%d scrHeg:%dn", scrWid, scrHeg);
}

void close_fb(void)
{
    // 解除映射并关闭framebuffer设备
    munmap(fbp, finfo.smem_len);
    close(fbfd);
}

#define argb8888_to_rgba888(color) ({ 
    unsigned int temp = (color);      
    ((temp & 0xff0000UL) >> 16) |     
        ((temp & 0xff00UL) >> 0) |    
        ((temp & 0xffUL) << 16);      
})

/********************************************************************
 * 函数名称: lcd_draw_point
 * 功能描述: 打点
 * 输入参数: x, y, color
 * 返 回 值: 无
 ********************************************************************/
static void lcd_draw_point(unsigned int x, unsigned int y, unsigned int color)
{
    unsigned int rgb565_color = argb8888_to_rgba888(color); // 得到RGB565颜色值

    /* 填充颜色 */
    fbp[y * scrWid + x] = color;
}

void draw_point(int x, int y, uint8_t *color)
{
    lcd_draw_point(x, y, *(uint32_t *)color);
}

void clr_scr(int w, int h)
{
    static int cnt = 0;
    printf("clr scr:%dn", cnt);
    cnt++;

    char clor[4] = {0xff, 0xff, 0xff};

    for (int i = 0; i < h; i++)
        for (int j = 0; j < w; j++)
            draw_point(j, i, clor);
}

int init_outframe_rgba(
        AVFrame **ppOutFrame, 
        enum AVPixelFormat eOutFormat, // 输出视频格式
        int32_t nOutWidth,             // 输出视频宽度
        int32_t nOutHeight  )         // 输出视频高度)
{
    AVFrame *pOutFrame = NULL;
    // 创建输出视频帧对象以及分配相应的缓冲区
    uint8_t *data[4] = {NULL};
    int linesize[4] = {0};
    int res = av_image_alloc(data, linesize, nOutWidth, nOutHeight, eOutFormat, 1);
    if (res < 0)
    {
        printf("<VideoConvert> [ERROR] fail to av_image_alloc(), res=%dn", res);
        return -2;
    }
    pOutFrame = av_frame_alloc();
    pOutFrame->format = eOutFormat;
    pOutFrame->width = nOutWidth;
    pOutFrame->height = nOutHeight;
    pOutFrame->data[0] = data[0];
    pOutFrame->data[1] = data[1];
    pOutFrame->data[2] = data[2];
    pOutFrame->data[3] = data[3];
    pOutFrame->linesize[0] = linesize[0];
    pOutFrame->linesize[1] = linesize[1];
    pOutFrame->linesize[2] = linesize[2];
    pOutFrame->linesize[3] = linesize[3];
    (*ppOutFrame) = pOutFrame;

    return 0;
}

int32_t VideoConvert(
    const AVFrame *pInFrame,       // 输入视频帧
    enum AVPixelFormat eOutFormat, // 输出视频格式
    int32_t nOutWidth,             // 输出视频宽度
    int32_t nOutHeight,            // 输出视频高度
    AVFrame **ppOutFrame)          // 输出视频帧
{
    struct SwsContext *pSwsCtx;
    AVFrame *pOutFrame = *ppOutFrame;

    // 创建格式转换器, 指定缩放算法,转换过程中不增加任何滤镜特效处理
    pSwsCtx = sws_getContext(pInFrame->width, pInFrame->height, (enum AVPixelFormat)pInFrame->format,
                             nOutWidth, nOutHeight, eOutFormat,
                             SWS_BICUBIC, NULL, NULL, NULL);
    if (pSwsCtx == NULL)
    {
        printf("<VideoConvert> [ERROR] fail to sws_getContext()n");
        return -1;
    }

    
    int res = 0;
    // 进行格式转换处理
    res = sws_scale(pSwsCtx,
                    (const uint8_t *const *)(pInFrame->data),
                    pInFrame->linesize,
                    0,
                    pOutFrame->height,
                    pOutFrame->data,
                    pOutFrame->linesize);
    if (res < 0)
    {
        printf("<VideoConvert> [ERROR] fail to sws_scale(), res=%dn", res);
        sws_freeContext(pSwsCtx);
        av_frame_free(&pOutFrame);
        return -3;
    }

    
    sws_freeContext(pSwsCtx); // 释放转换器
    return 0;
}

static int video_decode_example(const char *url_rtsp)
{
    AVDictionary *pAVDictionary = 0;
    AVCodec *pAVCodec = NULL;
    AVCodecContext *pAVCodecContext = NULL;
    AVCodecParameters *origin_par = NULL;
    AVFrame *pAVFrame = NULL;
    AVFrame *pAVFrameRGB32 = NULL;
    AVStream *pAVStream = NULL;                        // ffmpeg流信息
    uint8_t *byte_buffer = NULL;
    AVPacket *pAVPacket = av_packet_alloc();
    AVFormatContext *pAVFormatContext = NULL;
    int number_of_written_bytes;
    int video_stream;
    int got_frame = 0;
    int byte_buffer_size;
    int i = 0;
    int result;
    int end_of_stream = 0;

    av_log(NULL, AV_LOG_ERROR, "enter videon");

    pAVFormatContext = avformat_alloc_context(); // 用来申请AVFormatContext类型变量并初始化默认参数,申请的空间

    result = avformat_open_input(&pAVFormatContext, url_rtsp, NULL, NULL);
    if (result < 0)
    {
        av_log(NULL, AV_LOG_ERROR, "Can't open file, res:%dn", result);
        return result;
    }
    av_log(NULL, AV_LOG_ERROR, "open video file okn");

    result = avformat_find_stream_info(pAVFormatContext, NULL);
    if (result < 0)
    {
        av_log(NULL, AV_LOG_ERROR, "Can't get stream infon");
        return result;
    }

    av_log(NULL, AV_LOG_ERROR, "get stream infon");
    video_stream = av_find_best_stream(pAVFormatContext, AVMEDIA_TYPE_VIDEO, -1, -1, NULL, 0);
    if (video_stream < 0)
    {
        av_log(NULL, AV_LOG_ERROR, "Can't find video stream in input filen");
        return -1;
    }

    av_log(NULL, AV_LOG_ERROR, "get video stream infon");

    pAVCodecContext = pAVFormatContext->streams[video_stream]->codec;

    pAVCodec = avcodec_find_decoder(pAVCodecContext->codec_id);
    if (!pAVCodec)
    {
        av_log(NULL, AV_LOG_ERROR, "Can't find decodern");
        return -1;
    }
    av_log(NULL, AV_LOG_ERROR, "get video codec n");

    // 设置缓存大小 1024000byte
    av_dict_set(&pAVDictionary, "buffer_size", "4096000", 0);
    // 设置超时时间 20s
    av_dict_set(&pAVDictionary, "stimeout", "20000000", 0);
    // 设置最大延时 3s
    av_dict_set(&pAVDictionary, "max_delay", "90000000", 0);
    // 设置打开方式 tcp/udp
    av_dict_set(&pAVDictionary, "rtsp_transport", "tcp", 0);

    result = avcodec_open2(pAVCodecContext, pAVCodec, &pAVDictionary);
    if (result < 0)
    {
        av_log(pAVCodecContext, AV_LOG_ERROR, "Can't open decodern");
        return result;
    }
    av_log(NULL, AV_LOG_ERROR, "open video codec yesn");

    pAVStream = pAVFormatContext->streams[video_stream];
    // 显示视频相关的参数信息(编码上下文)
    printf( "比特率:%dn" , pAVCodecContext->bit_rate);

    printf( "宽高:%d-%dn" , pAVCodecContext->width, pAVCodecContext->height);
    printf( "格式:%dn" , pAVCodecContext->pix_fmt);  // AV_PIX_FMT_YUV420P 0
    printf( "帧率分母:%dn" , pAVCodecContext->time_base.den);
    printf( "帧率分子:%dn" , pAVCodecContext->time_base.num);
    printf( "帧率分母:%dn" , pAVStream->avg_frame_rate.den);
    printf( "帧率分子:%dn" , pAVStream->avg_frame_rate.num);
    printf( "总时长:%d sn" , pAVStream->duration / 10000.0);
    printf( "总帧数:%d  n" , pAVStream->nb_frames);

    pAVFrame = av_frame_alloc();
    if (!pAVFrame)
    {
        av_log(NULL, AV_LOG_ERROR, "Can't allocate framen");
        return AVERROR(ENOMEM);
    }

    int out_w = pAVCodecContext->width;
    int out_h = pAVCodecContext->height;
    result = init_outframe_rgba(&pAVFrameRGB32, AV_PIX_FMT_BGRA, out_w, out_h);
    if (result < 0)
    {
        av_log(pAVCodecContext, AV_LOG_ERROR, "init outfram_rgb failedn");
        return result;
    }

    printf("#tb %d: %d/%dn", video_stream, pAVFormatContext->streams[video_stream]->time_base.num,
           pAVFormatContext->streams[video_stream]->time_base.den);
    i = 0;

    av_init_packet(pAVPacket);

    while (1)
    {
        result = av_read_frame(pAVFormatContext, pAVPacket);
        if (result >= 0)
        {
            if (pAVPacket->stream_index == video_stream)
            {
                // 步骤八:对读取的数据包进行解码
                result = avcodec_send_packet(pAVCodecContext, pAVPacket);
                if (result)
                {
                    printf("Failed to avcodec_send_packet(pAVCodecContext, pAVPacket) ,ret =%d", result);
                    break;
                }
                while (!avcodec_receive_frame(pAVCodecContext, pAVFrame))
                {
                   
                    VideoConvert(pAVFrame, AV_PIX_FMT_BGRA, out_w, out_h, &pAVFrameRGB32);

                    for (int h = 0; h < out_h; h++)
                        for (int w = 0; w < out_w; w++)
                        {
                            draw_point(w, h, (pAVFrameRGB32->data[0]) + ((h * out_w * 4 + w * 4)));
                        }
                    printf("draw one picn");
                }

                //av_frame_free(&pAVFrameRGB32);
                //av_packet_unref(&pAVPacket);
                // av_init_packet(&pAVPacket);
            }
        }
    }

    av_packet_unref(&pAVPacket);
    av_frame_free(&pAVFrame);
    avcodec_close(pAVCodecContext);
    avformat_close_input(&pAVFormatContext);
    avcodec_free_context(&pAVCodecContext);
    av_freep(&byte_buffer);
    return 0;
}

int main(int argc, char **argv)
{
    if (argc < 2)
    {
        av_log(NULL, AV_LOG_ERROR, "Incorrect inputn");
        return 1;
    }
    avcodec_register_all();
    printf("reigister net workn");

    avformat_network_init();
#if CONFIG_AVDEVICE
    avdevice_register_all();
#endif
    printf("reigister filtern");
    avfilter_register_all();

    av_register_all();
    open_fb();
    clr_scr(scrWid, scrHeg);
    usleep(1000 * 1000 * 1);

    printf("video file :%sn", argv[1]);
    if (video_decode_example(argv[1]) != 0)
        return 1;

    close_fb();
    return 0;
}

makefile文件如下:

FFMPEG=/home/shengy/alpha_build/
CC=arm-linux-gnueabihf-gcc

CFLAGS=-g -I$(FFMPEG)/include

LDFLAGS = -L$(FFMPEG)/lib/  -lswresample -lavformat -lavdevice -lavcodec -lavutil -lswscale -lavfilter -lm
TARGETS=test_004_rtsp

all:$(TARGETS)
	
test_004_rtsp:test_004_rtsp.c
	$(CC) $(CFLAGS) -o $@ $^ $(LDFLAGS) -std=c99  
	
clean:
	rm -rf $(TARGETS)

原文地址:https://blog.csdn.net/qq_41852100/article/details/135991867

本文来自互联网用户投稿,该文观点仅代表作者本人,不代表本站立场。本站仅提供信息存储空间服务,不拥有所有权,不承担相关法律责任。

如若转载,请注明出处:http://www.7code.cn/show_66693.html

如若内容造成侵权/违法违规/事实不符,请联系代码007邮箱:suwngjj01@126.com进行投诉反馈,一经查实,立即删除!

发表回复

您的邮箱地址不会被公开。 必填项已用 * 标注