Chinaunix首页 | 论坛 | 博客
  • 博客访问: 7894128
  • 博文数量: 701
  • 博客积分: 2150
  • 博客等级: 上尉
  • 技术积分: 13233
  • 用 户 组: 普通用户
  • 注册时间: 2011-06-29 16:28
个人简介

天行健,君子以自强不息!

文章分类

全部博文(701)

文章存档

2019年(2)

2018年(12)

2017年(76)

2016年(120)

2015年(178)

2014年(129)

2013年(123)

2012年(61)

分类: 网络与安全

2014-02-05 22:41:13

之前分析了一下RTMPDump的Main()函数,其中获取RTMP流媒体数据很重要的前提是RTMP的URL的解析。
如果没有这一步,那程序在强大也是白搭。
现在来解析一下这个函数吧:RTMP_ParseURL()。


下面首先回顾一下RTMP的URL的格式:


rtmp://localhost/vod/mp4:sample1_1500kbps.f4v
“://”之前的是使用的协议类型,可以是rtmp,rtmpt,rtmps等


之后是服务器地址


再之后是端口号(可以没有,默认1935)


在之后是application的名字,在这里是“vod”


最后是流媒体文件路径。


关于URL就不多说了,可以查看相关文档,下面贴上注释后的代码(整个parseurl.c):
/*
 * 本文件主要包含了对输入URL的解析
 */
#include "stdafx.h"
#include
#include


#include
#include


#include "rtmp_sys.h"
#include "log.h"


/*解析URL,得到协议名称(protocol),主机名称(host),应用程序名称(app)
 *
 */
int RTMP_ParseURL(const char *url, int *protocol, AVal *host, unsigned int *port,
  AVal *playpath, AVal *app)
{
  char *p, *end, *col, *ques, *slash;


  RTMP_Log(RTMP_LOGDEBUG, "Parsing...");


  *protocol = RTMP_PROTOCOL_RTMP;
  *port = 0;
  playpath->av_len = 0;
  playpath->av_val = NULL;
  app->av_len = 0;
  app->av_val = NULL;


  /* 字符串解析 */
  /* 查找“://”  */
  //函数原型:char *strstr(char *str1, char *str2);
  //功能:找出str2字符串在str1字符串中第一次出现的位置(不包括str2的串结束符)。
  //返回值:返回该位置的指针,如找不到,返回空指针。
  p = strstr((char *)url, "://");
  if(!p) {
    RTMP_Log(RTMP_LOGERROR, "RTMP URL: No :// in url!");
    return FALSE;
  }
  {
  //指针相减,返回“://”之前字符串长度len
  int len = (int)(p-url);
  //获取使用的协议
  //通过比较字符串的方法
  if(len == 4 && strncasecmp(url, "rtmp", 4)==0)
    *protocol = RTMP_PROTOCOL_RTMP;
  else if(len == 5 && strncasecmp(url, "rtmpt", 5)==0)
    *protocol = RTMP_PROTOCOL_RTMPT;
  else if(len == 5 && strncasecmp(url, "rtmps", 5)==0)
          *protocol = RTMP_PROTOCOL_RTMPS;
  else if(len == 5 && strncasecmp(url, "rtmpe", 5)==0)
          *protocol = RTMP_PROTOCOL_RTMPE;
  else if(len == 5 && strncasecmp(url, "rtmfp", 5)==0)
          *protocol = RTMP_PROTOCOL_RTMFP;
  else if(len == 6 && strncasecmp(url, "rtmpte", 6)==0)
          *protocol = RTMP_PROTOCOL_RTMPTE;
  else if(len == 6 && strncasecmp(url, "rtmpts", 6)==0)
          *protocol = RTMP_PROTOCOL_RTMPTS;
  else {
    RTMP_Log(RTMP_LOGWARNING, "Unknown protocol!\n");
    goto parsehost;
  }
  }


  RTMP_Log(RTMP_LOGDEBUG, "Parsed protocol: %d", *protocol);


parsehost:
  //获取主机名称
  //跳过“://”
  p+=3;


  /* 检查一下主机名 */
  if(*p==0) {
    RTMP_Log(RTMP_LOGWARNING, "No hostname in URL!");
    return FALSE;
  }
  //原型:char *strchr(const char *s,char c); 
  //功能:查找字符串s中首次出现字符c的位置
  //说明:返回首次出现c的位置的指针,如果s中不存在c则返回NULL。 
  end   = p + strlen(p);//指向结尾的指针
  col   = strchr(p, ':');//指向冒号(第一个)的指针
  ques  = strchr(p, '?');//指向问号(第一个)的指针
  slash = strchr(p, '/');//指向斜杠(第一个)的指针


  {
  int hostlen;
  if(slash)
    hostlen = slash - p;
  else
    hostlen = end - p;
  if(col && col -p < hostlen)
    hostlen = col - p;


  if(hostlen < 256) {
    host->av_val = p;
    host->av_len = hostlen;
    RTMP_Log(RTMP_LOGDEBUG, "Parsed host    : %.*s", hostlen, host->av_val);
  } else {
    RTMP_Log(RTMP_LOGWARNING, "Hostname exceeds 255 characters!");
  }


  p+=hostlen;
  }


  /* 获取端口号 */
  if(*p == ':') {
    unsigned int p2;
    p++;
    p2 = atoi(p);
    if(p2 > 65535) {
      RTMP_Log(RTMP_LOGWARNING, "Invalid port number!");
    } else {
      *port = p2;
    }
  }


  if(!slash) {
    RTMP_Log(RTMP_LOGWARNING, "No application or playpath in URL!");
    return TRUE;
  }
  p = slash+1;


  {
  /* 获取应用程序(application)
   *
   * rtmp://host[:port]/app[/appinstance][/...]
   * application = app[/appinstance]
   */


  char *slash2, *slash3 = NULL;//指向第二个斜杠,第三个斜杠的指针
  int applen, appnamelen;


  slash2 = strchr(p, '/');//指向第二个斜杠
  if(slash2)
    slash3 = strchr(slash2+1, '/');//指向第三个斜杠,注意slash2之所以+1是因为让其后移一位


  applen = end-p; /* ondemand, pass all parameters as app */
  appnamelen = applen; /* ondemand length */


  if(ques && strstr(p, "slist=")) { 
    /* whatever it is, the '?' and slist= means we need to use everything as app and parse plapath from slist= */
    appnamelen = ques-p;
  }
  else if(strncmp(p, "ondemand/", 9)==0) {
                /* app = ondemand/foobar, only pass app=ondemand */
                applen = 8;
                appnamelen = 8;
  } else { 
    /* app!=ondemand, so app is app[/appinstance] */
    if(slash3)
      appnamelen = slash3-p;
    else if(slash2)
      appnamelen = slash2-p;


    applen = appnamelen;
  }


  app->av_val = p;
  app->av_len = applen;
  RTMP_Log(RTMP_LOGDEBUG, "Parsed app     : %.*s", applen, p);


  p += appnamelen;
  }


  if (*p == '/')
    p++;


  if (end-p) {
    AVal av = {p, end-p};
    RTMP_ParsePlaypath(&av, playpath);
  }


  return TRUE;
}


/*
 * 从URL中获取播放路径(playpath)。播放路径是URL中“rtmp://host:port/app/”后面的部分
 *
 * 获取FMS能够识别的播放路径
 * mp4 流: 前面添加 "mp4:", 删除扩展名
 * mp3 流: 前面添加 "mp3:", 删除扩展名
 * flv 流: 删除扩展名
 */
void RTMP_ParsePlaypath(AVal *in, AVal *out) {
  int addMP4 = 0;
  int addMP3 = 0;
  int subExt = 0;
  const char *playpath = in->av_val;
  const char *temp, *q, *ext = NULL;
  const char *ppstart = playpath;
  char *streamname, *destptr, *p;


  int pplen = in->av_len;


  out->av_val = NULL;
  out->av_len = 0;


  if ((*ppstart == '?') &&
      (temp=strstr(ppstart, "slist=")) != 0) {
    ppstart = temp+6;
    pplen = strlen(ppstart);


    temp = strchr(ppstart, '&');
    if (temp) {
      pplen = temp-ppstart;
    }
  }


  q = strchr(ppstart, '?');
  if (pplen >= 4) {
    if (q)
      ext = q-4;
    else
      ext = &ppstart[pplen-4];
    if ((strncmp(ext, ".f4v", 4) == 0) ||
        (strncmp(ext, ".mp4", 4) == 0)) {
      addMP4 = 1;
      subExt = 1;
    /* Only remove .flv from rtmp URL, not slist params */
    } else if ((ppstart == playpath) &&
        (strncmp(ext, ".flv", 4) == 0)) {
      subExt = 1;
    } else if (strncmp(ext, ".mp3", 4) == 0) {
      addMP3 = 1;
      subExt = 1;
    }
  }


  streamname = (char *)malloc((pplen+4+1)*sizeof(char));
  if (!streamname)
    return;


  destptr = streamname;
  if (addMP4) {
    if (strncmp(ppstart, "mp4:", 4)) {
      strcpy(destptr, "mp4:");
      destptr += 4;
    } else {
      subExt = 0;
    }
  } else if (addMP3) {
    if (strncmp(ppstart, "mp3:", 4)) {
      strcpy(destptr, "mp3:");
      destptr += 4;
    } else {
      subExt = 0;
    }
  }


  for (p=(char *)ppstart; pplen >0;) {
    /* skip extension */
    if (subExt && p == ext) {
      p += 4;
      pplen -= 4;
      continue;
    }
    if (*p == '%') {
      unsigned int c;
      sscanf(p+1, "%02x", &c);
      *destptr++ = c;
      pplen -= 3;
      p += 3;
    } else {
      *destptr++ = *p++;
      pplen--;
    }
  }
  *destptr = '\0';


  out->av_val = streamname;
  out->av_len = destptr - streamname;
}


之前分析了RTMPDump(libRTMP)解析RTMP的URL的源代码,在这里简单分析一下其AMF编码方面的源码。
AMF编码广泛用于Adobe公司的Flash以及Flex系统中。由于RTMP协议也是Adobe公司的,所以它也使用AMF进行通信。
具体AMF是怎么使用的在这里就不做详细讨论了。
可参考文件:
AMF3 中文版介绍:http://download.csdn.net/detail/leixiaohua1020/6389977


RTMPDump如果想实现RTMP协议的流媒体的下载保存,就必须可以编码和解码AMF格式的数据。


amf.c是RTMPDump解析RTMP协议的函数存放的地方,在这里贴上其源代码。先不做详细解释了,以后有机会再补充。


#include "stdafx.h"
/*  本文件主要包含了对AMF对象的操作
 *-------------------------------------
 *AMF数据类型:
 *Type    Byte code
 *Number  0x00
 *Boolean 0x01
 *String  0x02
 *Object  0x03
 *MovieClip 0x04
 *Null    0x05
 *Undefined 0x06
 *Reference 0x07
 *MixedArray  0x08
 *EndOfObject 0x09
 *Array     0x0a
 *Date      0x0b
 *LongString  0x0c
 *Unsupported 0x0d
 *Recordset   0x0e
 *XML     0x0f
 *TypedObject (Class instance)  0x10
 *AMF3 data 0×11
 *--------------------------------------
 *应用举例:
 *0.Number这里指的是double类型,数据用8字节表示,比如十六进制00 40 10 00 00 00 00 00 00就表示的是一个double数4.0
 *1.Boolean对应的是.net中的bool类型,数据使用1字节表示,和C语言差不多,使用00表示false,使用01表示true。比如十六进制01 01就表示true。
 *2.String相当于.net中的string类型,String所占用的空间有1个类型标识字节和2个表示字符串UTF8长度的字节加上字符串UTF8格式的内容组成。
 *  比如十六进制03 00 08 73 68 61 6E 67 67 75 61表示的就是字符串,该字符串长8字节,字符串内容为73 68 61 6E 67 67 75 61,对应的就是“shanggua”。
 *3.Object在对应的就是Hashtable,内容由UTF8字符串作为Key,其他AMF类型作为Value,该对象由3个字节:00 00 09来表示结束。
 *5.Null就是空对象,该对象只占用一个字节,那就是Null对象标识0x05。
 *6.Undefined 也是只占用一个字节0x06。
 *8.MixedArray相当于Hashtable,与3不同的是该对象定义了Hashtable的大小。
 */






#include
#include
#include


#include "rtmp_sys.h"
#include "amf.h"
#include "log.h"
#include "bytes.h"


static const AMFObjectProperty AMFProp_Invalid = { {0, 0}, AMF_INVALID };
static const AVal AV_empty = { 0, 0 };


//大端Big-Endian
//低地址存放最高有效位(MSB),既高位字节排放在内存的低地址端,低位字节排放在内存的高地址端。
//符合人脑逻辑,与计算机逻辑不同
//网络字节序 Network Order:TCP/IP各层协议将字节序定义为Big-Endian,因此TCP/IP协议中使
//用的字节序通常称之为网络字节序。
//主机序 Host Orader:它遵循Little-Endian规则。所以当两台主机之间要通过TCP/IP协议进行通
//信的时候就需要调用相应的函数进行主机序(Little-Endian)和网络序(Big-Endian)的转换。




/*AMF数据采用 Big-Endian(大端模式),主机采用Little-Endian(小端模式) */


unsigned short
AMF_DecodeInt16(const char *data)
{
  unsigned char *c = (unsigned char *) data;
  unsigned short val;
  val = (c[0] << 8) | c[1];//转换
  return val;
}


unsigned int
AMF_DecodeInt24(const char *data)
{
  unsigned char *c = (unsigned char *) data;
  unsigned int val;
  val = (c[0] << 16) | (c[1] << 8) | c[2];
  return val;
}


unsigned int
AMF_DecodeInt32(const char *data)
{
  unsigned char *c = (unsigned char *)data;
  unsigned int val;
  val = (c[0] << 24) | (c[1] << 16) | (c[2] << 8) | c[3];
  return val;
}


void
AMF_DecodeString(const char *data, AVal *bv)
{
  bv->av_len = AMF_DecodeInt16(data);
  bv->av_val = (bv->av_len > 0) ? (char *)data + 2 : NULL;
}


void
AMF_DecodeLongString(const char *data, AVal *bv)
{
  bv->av_len = AMF_DecodeInt32(data);
  bv->av_val = (bv->av_len > 0) ? (char *)data + 4 : NULL;
}


double
AMF_DecodeNumber(const char *data)
{
  double dVal;
#if __FLOAT_WORD_ORDER == __BYTE_ORDER
#if __BYTE_ORDER == __BIG_ENDIAN
  memcpy(&dVal, data, 8);
#elif __BYTE_ORDER == __LITTLE_ENDIAN
  unsigned char *ci, *co;
  ci = (unsigned char *)data;
  co = (unsigned char *)&dVal;
  co[0] = ci[7];
  co[1] = ci[6];
  co[2] = ci[5];
  co[3] = ci[4];
  co[4] = ci[3];
  co[5] = ci[2];
  co[6] = ci[1];
  co[7] = ci[0];
#endif
#else
#if __BYTE_ORDER == __LITTLE_ENDIAN /* __FLOAT_WORD_ORER == __BIG_ENDIAN */
  unsigned char *ci, *co;
  ci = (unsigned char *)data;
  co = (unsigned char *)&dVal;
  co[0] = ci[3];
  co[1] = ci[2];
  co[2] = ci[1];
  co[3] = ci[0];
  co[4] = ci[7];
  co[5] = ci[6];
  co[6] = ci[5];
  co[7] = ci[4];
#else /* __BYTE_ORDER == __BIG_ENDIAN && __FLOAT_WORD_ORER == __LITTLE_ENDIAN */
  unsigned char *ci, *co;
  ci = (unsigned char *)data;
  co = (unsigned char *)&dVal;
  co[0] = ci[4];
  co[1] = ci[5];
  co[2] = ci[6];
  co[3] = ci[7];
  co[4] = ci[0];
  co[5] = ci[1];
  co[6] = ci[2];
  co[7] = ci[3];
#endif
#endif
  return dVal;
}


int
AMF_DecodeBoolean(const char *data)
{
  return *data != 0;
}


char *
AMF_EncodeInt16(char *output, char *outend, short nVal)
{
  if (output+2 > outend)
    return NULL;


  output[1] = nVal & 0xff;
  output[0] = nVal >> 8;
  return output+2;
}


//3字节的int数据进行AMF编码,AMF采用大端模式
char *
AMF_EncodeInt24(char *output, char *outend, int nVal)
{
  if (output+3 > outend)
    return NULL;
  //倒过来
  output[2] = nVal & 0xff;
  output[1] = nVal >> 8;
  output[0] = nVal >> 16;
  //返回指针指向编码后数据的尾部
  return output+3;
}


char *
AMF_EncodeInt32(char *output, char *outend, int nVal)
{
  if (output+4 > outend)
    return NULL;


  output[3] = nVal & 0xff;
  output[2] = nVal >> 8;
  output[1] = nVal >> 16;
  output[0] = nVal >> 24;
  return output+4;
}


char *
AMF_EncodeString(char *output, char *outend, const AVal *bv)
{
  if ((bv->av_len < 65536 && output + 1 + 2 + bv->av_len > outend) ||
  output + 1 + 4 + bv->av_len > outend)
    return NULL;


  if (bv->av_len < 65536)
  {
      *output++ = AMF_STRING;


      output = AMF_EncodeInt16(output, outend, bv->av_len);
  }
  else
  {
      *output++ = AMF_LONG_STRING;


      output = AMF_EncodeInt32(output, outend, bv->av_len);
  }
  memcpy(output, bv->av_val, bv->av_len);
  output += bv->av_len;


  return output;
}


char *
AMF_EncodeNumber(char *output, char *outend, double dVal)
{
  if (output+1+8 > outend)
    return NULL;


  *output++ = AMF_NUMBER; /* type: Number */


#if __FLOAT_WORD_ORDER == __BYTE_ORDER
#if __BYTE_ORDER == __BIG_ENDIAN
  memcpy(output, &dVal, 8);
#elif __BYTE_ORDER == __LITTLE_ENDIAN
  {
    unsigned char *ci, *co;
    ci = (unsigned char *)&dVal;
    co = (unsigned char *)output;
    co[0] = ci[7];
    co[1] = ci[6];
    co[2] = ci[5];
    co[3] = ci[4];
    co[4] = ci[3];
    co[5] = ci[2];
    co[6] = ci[1];
    co[7] = ci[0];
  }
#endif
#else
#if __BYTE_ORDER == __LITTLE_ENDIAN /* __FLOAT_WORD_ORER == __BIG_ENDIAN */
  {
    unsigned char *ci, *co;
    ci = (unsigned char *)&dVal;
    co = (unsigned char *)output;
    co[0] = ci[3];
    co[1] = ci[2];
    co[2] = ci[1];
    co[3] = ci[0];
    co[4] = ci[7];
    co[5] = ci[6];
    co[6] = ci[5];
    co[7] = ci[4];
  }
#else /* __BYTE_ORDER == __BIG_ENDIAN && __FLOAT_WORD_ORER == __LITTLE_ENDIAN */
  {
    unsigned char *ci, *co;
    ci = (unsigned char *)&dVal;
    co = (unsigned char *)output;
    co[0] = ci[4];
    co[1] = ci[5];
    co[2] = ci[6];
    co[3] = ci[7];
    co[4] = ci[0];
    co[5] = ci[1];
    co[6] = ci[2];
    co[7] = ci[3];
  }
#endif
#endif


  return output+8;
}


char *
AMF_EncodeBoolean(char *output, char *outend, int bVal)
{
  if (output+2 > outend)
    return NULL;


  *output++ = AMF_BOOLEAN;


  *output++ = bVal ? 0x01 : 0x00;


  return output;
}


char *
AMF_EncodeNamedString(char *output, char *outend, const AVal *strName, const AVal *strValue)
{
  if (output+2+strName->av_len > outend)
    return NULL;
  output = AMF_EncodeInt16(output, outend, strName->av_len);


  memcpy(output, strName->av_val, strName->av_len);
  output += strName->av_len;


  return AMF_EncodeString(output, outend, strValue);
}


char *
AMF_EncodeNamedNumber(char *output, char *outend, const AVal *strName, double dVal)
{
  if (output+2+strName->av_len > outend)
    return NULL;
  output = AMF_EncodeInt16(output, outend, strName->av_len);


  memcpy(output, strName->av_val, strName->av_len);
  output += strName->av_len;


  return AMF_EncodeNumber(output, outend, dVal);
}


char *
AMF_EncodeNamedBoolean(char *output, char *outend, const AVal *strName, int bVal)
{
  if (output+2+strName->av_len > outend)
    return NULL;
  output = AMF_EncodeInt16(output, outend, strName->av_len);


  memcpy(output, strName->av_val, strName->av_len);
  output += strName->av_len;


  return AMF_EncodeBoolean(output, outend, bVal);
}


void
AMFProp_GetName(AMFObjectProperty *prop, AVal *name)
{
  *name = prop->p_name;
}


void
AMFProp_SetName(AMFObjectProperty *prop, AVal *name)
{
  prop->p_name = *name;
}


AMFDataType
AMFProp_GetType(AMFObjectProperty *prop)
{
  return prop->p_type;
}


double
AMFProp_GetNumber(AMFObjectProperty *prop)
{
  return prop->p_vu.p_number;
}


int
AMFProp_GetBoolean(AMFObjectProperty *prop)
{
  return prop->p_vu.p_number != 0;
}


void
AMFProp_GetString(AMFObjectProperty *prop, AVal *str)
{
  *str = prop->p_vu.p_aval;
}


void
AMFProp_GetObject(AMFObjectProperty *prop, AMFObject *obj)
{
  *obj = prop->p_vu.p_object;
}


int
AMFProp_IsValid(AMFObjectProperty *prop)
{
  return prop->p_type != AMF_INVALID;
}


char *
AMFProp_Encode(AMFObjectProperty *prop, char *pBuffer, char *pBufEnd)
{
  if (prop->p_type == AMF_INVALID)
    return NULL;


  if (prop->p_type != AMF_NULL && pBuffer + prop->p_name.av_len + 2 + 1 >= pBufEnd)
    return NULL;


  if (prop->p_type != AMF_NULL && prop->p_name.av_len)
    {
      *pBuffer++ = prop->p_name.av_len >> 8;
      *pBuffer++ = prop->p_name.av_len & 0xff;
      memcpy(pBuffer, prop->p_name.av_val, prop->p_name.av_len);
      pBuffer += prop->p_name.av_len;
    }


  switch (prop->p_type)
    {
    case AMF_NUMBER:
      pBuffer = AMF_EncodeNumber(pBuffer, pBufEnd, prop->p_vu.p_number);
      break;


    case AMF_BOOLEAN:
      pBuffer = AMF_EncodeBoolean(pBuffer, pBufEnd, prop->p_vu.p_number != 0);
      break;


    case AMF_STRING:
      pBuffer = AMF_EncodeString(pBuffer, pBufEnd, &prop->p_vu.p_aval);
      break;


    case AMF_NULL:
      if (pBuffer+1 >= pBufEnd)
        return NULL;
      *pBuffer++ = AMF_NULL;
      break;


    case AMF_OBJECT:
      pBuffer = AMF_Encode(&prop->p_vu.p_object, pBuffer, pBufEnd);
      break;


    default:
      RTMP_Log(RTMP_LOGERROR, "%s, invalid type. %d", __FUNCTION__, prop->p_type);
      pBuffer = NULL;
    };


  return pBuffer;
}


#define AMF3_INTEGER_MAX  268435455
#define AMF3_INTEGER_MIN  -268435456


int
AMF3ReadInteger(const char *data, int32_t *valp)
{
  int i = 0;
  int32_t val = 0;


  while (i <= 2)
  {   
    /* handle first 3 bytes */
    if (data[i] & 0x80)
    {     /* byte used */
      val <<= 7;    /* shift up */
      val |= (data[i] & 0x7f);  /* add bits */
      i++;
    }
    else
    {
      break;
    }
  }


  if (i > 2)
  {       
    /* use 4th byte, all 8bits */
    val <<= 8;
    val |= data[3];


    /* range check */
    if (val > AMF3_INTEGER_MAX)
    val -= (1 << 29);
  }
  else
  {       
    /* use 7bits of last unparsed byte (0xxxxxxx) */
    val <<= 7;
    val |= data[i];
  }


  *valp = val;


  return i > 2 ? 4 : i + 1;
}


int
AMF3ReadString(const char *data, AVal *str)
{
  int32_t ref = 0;
  int len;
  assert(str != 0);


  len = AMF3ReadInteger(data, &ref);
  data += len;


  if ((ref & 0x1) == 0)
  {       /* reference: 0xxx */
    uint32_t refIndex = (ref >> 1);
    RTMP_Log(RTMP_LOGDEBUG,
    "%s, string reference, index: %d, not supported, ignoring!",
    __FUNCTION__, refIndex);
      return len;
  }
  else
  {
    uint32_t nSize = (ref >> 1);


    str->av_val = (char *)data;
    str->av_len = nSize;


    return len + nSize;
  }
  return len;
}


int
AMF3Prop_Decode(AMFObjectProperty *prop, const char *pBuffer, int nSize,
    int bDecodeName)
{
  int nOriginalSize = nSize;
  AMF3DataType type;


  prop->p_name.av_len = 0;
  prop->p_name.av_val = NULL;


  if (nSize == 0 || !pBuffer)
  {
    RTMP_Log(RTMP_LOGDEBUG, "empty buffer/no buffer pointer!");
    return -1;
  }


  /* decode name */
  if (bDecodeName)
  {
      AVal name;
      int nRes = AMF3ReadString(pBuffer, &name);


      if (name.av_len <= 0)
        return nRes;


      prop->p_name = name;
      pBuffer += nRes;
      nSize -= nRes;
  }


  /* decode */
  type = (AMF3DataType) *pBuffer++;
  nSize--;


  switch (type)
  {
    case AMF3_UNDEFINED:
    case AMF3_NULL:
      prop->p_type = AMF_NULL;
      break;
    case AMF3_FALSE:
      prop->p_type = AMF_BOOLEAN;
      prop->p_vu.p_number = 0.0;
      break;
    case AMF3_TRUE:
      prop->p_type = AMF_BOOLEAN;
      prop->p_vu.p_number = 1.0;
      break;
    case AMF3_INTEGER:
    {
      int32_t res = 0;
  int len = AMF3ReadInteger(pBuffer, &res);
  prop->p_vu.p_number = (double)res;
  prop->p_type = AMF_NUMBER;
  nSize -= len;
  break;
      }
    case AMF3_DOUBLE:
      if (nSize < 8)
  return -1;
      prop->p_vu.p_number = AMF_DecodeNumber(pBuffer);
      prop->p_type = AMF_NUMBER;
      nSize -= 8;
      break;
    case AMF3_STRING:
    case AMF3_XML_DOC:
    case AMF3_XML:
      {
  int len = AMF3ReadString(pBuffer, &prop->p_vu.p_aval);
  prop->p_type = AMF_STRING;
  nSize -= len;
  break;
      }
    case AMF3_DATE:
      {
  int32_t res = 0;
  int len = AMF3ReadInteger(pBuffer, &res);


  nSize -= len;
  pBuffer += len;


  if ((res & 0x1) == 0)
    {     /* reference */
      uint32_t nIndex = (res >> 1);
      RTMP_Log(RTMP_LOGDEBUG, "AMF3_DATE reference: %d, not supported!", nIndex);
    }
  else
    {
      if (nSize < 8)
        return -1;


      prop->p_vu.p_number = AMF_DecodeNumber(pBuffer);
      nSize -= 8;
      prop->p_type = AMF_NUMBER;
    }
  break;
      }
    case AMF3_OBJECT:
      {
  int nRes = AMF3_Decode(&prop->p_vu.p_object, pBuffer, nSize, TRUE);
  if (nRes == -1)
    return -1;
  nSize -= nRes;
  prop->p_type = AMF_OBJECT;
  break;
      }
    case AMF3_ARRAY:
    case AMF3_BYTE_ARRAY:
    default:
      RTMP_Log(RTMP_LOGDEBUG, "%s - AMF3 unknown/unsupported datatype 0x%02x, @0x%08X",
    __FUNCTION__, (unsigned char)(*pBuffer), pBuffer);
      return -1;
    }


  return nOriginalSize - nSize;
}
//对AMF数据类型解析
int
AMFProp_Decode(AMFObjectProperty *prop, const char *pBuffer, int nSize,
         int bDecodeName)
{
  int nOriginalSize = nSize;
  int nRes;


  prop->p_name.av_len = 0;
  prop->p_name.av_val = NULL;


  if (nSize == 0 || !pBuffer)
    {
      RTMP_Log(RTMP_LOGDEBUG, "%s: Empty buffer/no buffer pointer!", __FUNCTION__);
      return -1;
    }


  if (bDecodeName && nSize < 4)
    {       /* at least name (length + at least 1 byte) and 1 byte of data */
      RTMP_Log(RTMP_LOGDEBUG,
    "%s: Not enough data for decoding with name, less than 4 bytes!",
    __FUNCTION__);
      return -1;
    }


  if (bDecodeName)
    {
      unsigned short nNameSize = AMF_DecodeInt16(pBuffer);
      if (nNameSize > nSize - 2)
  {
    RTMP_Log(RTMP_LOGDEBUG,
        "%s: Name size out of range: namesize (%d) > len (%d) - 2",
        __FUNCTION__, nNameSize, nSize);
    return -1;
  }


      AMF_DecodeString(pBuffer, &prop->p_name);
      nSize -= 2 + nNameSize;
      pBuffer += 2 + nNameSize;
    }


  if (nSize == 0)
    {
      return -1;
    }


  nSize--;


  prop->p_type = (AMFDataType) *pBuffer++;
  switch (prop->p_type)
    {
  //Number数据类型
    case AMF_NUMBER:
      if (nSize < 8)
  return -1;
      prop->p_vu.p_number = AMF_DecodeNumber(pBuffer);
      nSize -= 8;
      break;
   //Boolean数据类型
    case AMF_BOOLEAN:
      if (nSize < 1)
  return -1;
      prop->p_vu.p_number = (double)AMF_DecodeBoolean(pBuffer);
      nSize--;
      break;
    //String数据类型
    case AMF_STRING:
      {
  unsigned short nStringSize = AMF_DecodeInt16(pBuffer);


  if (nSize < (long)nStringSize + 2)
    return -1;
  AMF_DecodeString(pBuffer, &prop->p_vu.p_aval);
  nSize -= (2 + nStringSize);
  break;
      }
    //Object数据类型
    case AMF_OBJECT:
      {
  int nRes = AMF_Decode(&prop->p_vu.p_object, pBuffer, nSize, TRUE);
  if (nRes == -1)
    return -1;
  nSize -= nRes;
  break;
      }
    case AMF_MOVIECLIP:
      {
  RTMP_Log(RTMP_LOGERROR, "AMF_MOVIECLIP reserved!");
  return -1;
  break;
      }
    case AMF_NULL:
    case AMF_UNDEFINED:
    case AMF_UNSUPPORTED:
      prop->p_type = AMF_NULL;
      break;
    case AMF_REFERENCE:
      {
  RTMP_Log(RTMP_LOGERROR, "AMF_REFERENCE not supported!");
  return -1;
  break;
      }
    case AMF_ECMA_ARRAY:
      {
  nSize -= 4;


  /* next comes the rest, mixed array has a final 0x000009 mark and names, so its an object */
  nRes = AMF_Decode(&prop->p_vu.p_object, pBuffer + 4, nSize, TRUE);
  if (nRes == -1)
    return -1;
  nSize -= nRes;
  prop->p_type = AMF_OBJECT;
  break;
      }
    case AMF_OBJECT_END:
      {
  return -1;
  break;
      }
    case AMF_STRICT_ARRAY:
      {
  unsigned int nArrayLen = AMF_DecodeInt32(pBuffer);
  nSize -= 4;


  nRes = AMF_DecodeArray(&prop->p_vu.p_object, pBuffer + 4, nSize,
           nArrayLen, FALSE);
  if (nRes == -1)
    return -1;
  nSize -= nRes;
  prop->p_type = AMF_OBJECT;
  break;
      }
    case AMF_DATE:
      {
  RTMP_Log(RTMP_LOGDEBUG, "AMF_DATE");


  if (nSize < 10)
    return -1;


  prop->p_vu.p_number = AMF_DecodeNumber(pBuffer);
  prop->p_UTCoffset = AMF_DecodeInt16(pBuffer + 8);


  nSize -= 10;
  break;
      }
    case AMF_LONG_STRING:
      {
  unsigned int nStringSize = AMF_DecodeInt32(pBuffer);
  if (nSize < (long)nStringSize + 4)
    return -1;
  AMF_DecodeLongString(pBuffer, &prop->p_vu.p_aval);
  nSize -= (4 + nStringSize);
  prop->p_type = AMF_STRING;
  break;
      }
    case AMF_RECORDSET:
      {
  RTMP_Log(RTMP_LOGERROR, "AMF_RECORDSET reserved!");
  return -1;
  break;
      }
    case AMF_XML_DOC:
      {
  RTMP_Log(RTMP_LOGERROR, "AMF_XML_DOC not supported!");
  return -1;
  break;
      }
    case AMF_TYPED_OBJECT:
      {
  RTMP_Log(RTMP_LOGERROR, "AMF_TYPED_OBJECT not supported!");
  return -1;
  break;
      }
    case AMF_AVMPLUS:
      {
  int nRes = AMF3_Decode(&prop->p_vu.p_object, pBuffer, nSize, TRUE);
  if (nRes == -1)
    return -1;
  nSize -= nRes;
  prop->p_type = AMF_OBJECT;
  break;
      }
    default:
      RTMP_Log(RTMP_LOGDEBUG, "%s - unknown datatype 0x%02x, @0x%08X", __FUNCTION__,
    prop->p_type, pBuffer - 1);
      return -1;
    }


  return nOriginalSize - nSize;
}


void
AMFProp_Dump(AMFObjectProperty *prop)
{
  char strRes[256];
  char str[256];
  AVal name;


  if (prop->p_type == AMF_INVALID)
    {
      RTMP_Log(RTMP_LOGDEBUG, "Property: INVALID");
      return;
    }


  if (prop->p_type == AMF_NULL)
    {
      RTMP_Log(RTMP_LOGDEBUG, "Property: NULL");
      return;
    }


  if (prop->p_name.av_len)
    {
      name = prop->p_name;
    }
  else
    {
      name.av_val = "no-name.";
      name.av_len = sizeof("no-name.") - 1;
    }
  if (name.av_len > 18)
    name.av_len = 18;


  snprintf(strRes, 255, "Name: %18.*s, ", name.av_len, name.av_val);


  if (prop->p_type == AMF_OBJECT)
    {
      RTMP_Log(RTMP_LOGDEBUG, "Property: <%sOBJECT>", strRes);
      AMF_Dump(&prop->p_vu.p_object);
      return;
    }


  switch (prop->p_type)
    {
    case AMF_NUMBER:
      snprintf(str, 255, "NUMBER:\t%.2f", prop->p_vu.p_number);
      break;
    case AMF_BOOLEAN:
      snprintf(str, 255, "BOOLEAN:\t%s",
         prop->p_vu.p_number != 0.0 ? "TRUE" : "FALSE");
      break;
    case AMF_STRING:
      snprintf(str, 255, "STRING:\t%.*s", prop->p_vu.p_aval.av_len,
         prop->p_vu.p_aval.av_val);
      break;
    case AMF_DATE:
      snprintf(str, 255, "DATE:\ttimestamp: %.2f, UTC offset: %d",
         prop->p_vu.p_number, prop->p_UTCoffset);
      break;
    default:
      snprintf(str, 255, "INVALID TYPE 0x%02x", (unsigned char)prop->p_type);
    }


  RTMP_Log(RTMP_LOGDEBUG, "Property: <%s%s>", strRes, str);
}


void
AMFProp_Reset(AMFObjectProperty *prop)
{
  if (prop->p_type == AMF_OBJECT)
    AMF_Reset(&prop->p_vu.p_object);
  else
    {
      prop->p_vu.p_aval.av_len = 0;
      prop->p_vu.p_aval.av_val = NULL;
    }
  prop->p_type = AMF_INVALID;
}


/* AMFObject */


char *
AMF_Encode(AMFObject *obj, char *pBuffer, char *pBufEnd)
{
  int i;


  if (pBuffer+4 >= pBufEnd)
    return NULL;


  *pBuffer++ = AMF_OBJECT;


  for (i = 0; i < obj->o_num; i++)
    {
      char *res = AMFProp_Encode(&obj->o_props[i], pBuffer, pBufEnd);
      if (res == NULL)
  {
    RTMP_Log(RTMP_LOGERROR, "AMF_Encode - failed to encode property in index %d",
        i);
    break;
  }
      else
  {
    pBuffer = res;
  }
    }


  if (pBuffer + 3 >= pBufEnd)
    return NULL;      /* no room for the end marker */


  pBuffer = AMF_EncodeInt24(pBuffer, pBufEnd, AMF_OBJECT_END);


  return pBuffer;
}


int
AMF_DecodeArray(AMFObject *obj, const char *pBuffer, int nSize,
    int nArrayLen, int bDecodeName)
{
  int nOriginalSize = nSize;
  int bError = FALSE;


  obj->o_num = 0;
  obj->o_props = NULL;
  while (nArrayLen > 0)
    {
      AMFObjectProperty prop;
      int nRes;
      nArrayLen--;


      nRes = AMFProp_Decode(&prop, pBuffer, nSize, bDecodeName);
      if (nRes == -1)
  bError = TRUE;
      else
  {
    nSize -= nRes;
    pBuffer += nRes;
    AMF_AddProp(obj, &prop);
  }
    }
  if (bError)
    return -1;


  return nOriginalSize - nSize;
}


int
AMF3_Decode(AMFObject *obj, const char *pBuffer, int nSize, int bAMFData)
{
  int nOriginalSize = nSize;
  int32_t ref;
  int len;


  obj->o_num = 0;
  obj->o_props = NULL;
  if (bAMFData)
    {
      if (*pBuffer != AMF3_OBJECT)
  RTMP_Log(RTMP_LOGERROR,
      "AMF3 Object encapsulated in AMF stream does not start with AMF3_OBJECT!");
      pBuffer++;
      nSize--;
    }


  ref = 0;
  len = AMF3ReadInteger(pBuffer, &ref);
  pBuffer += len;
  nSize -= len;


  if ((ref & 1) == 0)
    {       /* object reference, 0xxx */
      uint32_t objectIndex = (ref >> 1);


      RTMP_Log(RTMP_LOGDEBUG, "Object reference, index: %d", objectIndex);
    }
  else        /* object instance */
    {
      int32_t classRef = (ref >> 1);


      AMF3ClassDef cd = { {0, 0}
      };
      AMFObjectProperty prop;


      if ((classRef & 0x1) == 0)
  {     /* class reference */
    uint32_t classIndex = (classRef >> 1);
    RTMP_Log(RTMP_LOGDEBUG, "Class reference: %d", classIndex);
  }
      else
  {
    int32_t classExtRef = (classRef >> 1);
    int i;


    cd.cd_externalizable = (classExtRef & 0x1) == 1;
    cd.cd_dynamic = ((classExtRef >> 1) & 0x1) == 1;


    cd.cd_num = classExtRef >> 2;


    /* class name */


    len = AMF3ReadString(pBuffer, &cd.cd_name);
    nSize -= len;
    pBuffer += len;


    /*std::string str = className; */


    RTMP_Log(RTMP_LOGDEBUG,
        "Class name: %s, externalizable: %d, dynamic: %d, classMembers: %d",
        cd.cd_name.av_val, cd.cd_externalizable, cd.cd_dynamic,
        cd.cd_num);


    for (i = 0; i < cd.cd_num; i++)
      {
        AVal memberName;
        len = AMF3ReadString(pBuffer, &memberName);
        RTMP_Log(RTMP_LOGDEBUG, "Member: %s", memberName.av_val);
        AMF3CD_AddProp(&cd, &memberName);
        nSize -= len;
        pBuffer += len;
      }
  }


      /* add as referencable object */


      if (cd.cd_externalizable)
  {
    int nRes;
    AVal name = AVC("DEFAULT_ATTRIBUTE");


    RTMP_Log(RTMP_LOGDEBUG, "Externalizable, TODO check");


    nRes = AMF3Prop_Decode(&prop, pBuffer, nSize, FALSE);
    if (nRes == -1)
      RTMP_Log(RTMP_LOGDEBUG, "%s, failed to decode AMF3 property!",
    __FUNCTION__);
    else
      {
        nSize -= nRes;
        pBuffer += nRes;
      }


    AMFProp_SetName(&prop, &name);
    AMF_AddProp(obj, &prop);
  }
      else
  {
    int nRes, i;
    for (i = 0; i < cd.cd_num; i++) /* non-dynamic */
      {
        nRes = AMF3Prop_Decode(&prop, pBuffer, nSize, FALSE);
        if (nRes == -1)
    RTMP_Log(RTMP_LOGDEBUG, "%s, failed to decode AMF3 property!",
        __FUNCTION__);


        AMFProp_SetName(&prop, AMF3CD_GetProp(&cd, i));
        AMF_AddProp(obj, &prop);


        pBuffer += nRes;
        nSize -= nRes;
      }
    if (cd.cd_dynamic)
      {
        int len = 0;


        do
    {
      nRes = AMF3Prop_Decode(&prop, pBuffer, nSize, TRUE);
      AMF_AddProp(obj, &prop);


      pBuffer += nRes;
      nSize -= nRes;


      len = prop.p_name.av_len;
    }
        while (len > 0);
      }
  }
      RTMP_Log(RTMP_LOGDEBUG, "class object!");
    }
  return nOriginalSize - nSize;
}
//解AMF编码的Object数据类型
int
AMF_Decode(AMFObject *obj, const char *pBuffer, int nSize, int bDecodeName)
{
  int nOriginalSize = nSize;
  int bError = FALSE;   /* if there is an error while decoding - try to at least find the end mark AMF_OBJECT_END */


  obj->o_num = 0;
  obj->o_props = NULL;
  while (nSize > 0)
    {
      AMFObjectProperty prop;
      int nRes;


      if (nSize >=3 && AMF_DecodeInt24(pBuffer) == AMF_OBJECT_END)
  {
    nSize -= 3;
    bError = FALSE;
    break;
  }


      if (bError)
  {
    RTMP_Log(RTMP_LOGERROR,
        "DECODING ERROR, IGNORING BYTES UNTIL NEXT KNOWN PATTERN!");
    nSize--;
    pBuffer++;
    continue;
  }
    //解Object里的Property
      nRes = AMFProp_Decode(&prop, pBuffer, nSize, bDecodeName);
      if (nRes == -1)
  bError = TRUE;
      else
  {
    nSize -= nRes;
    pBuffer += nRes;
    AMF_AddProp(obj, &prop);
  }
    }


  if (bError)
    return -1;


  return nOriginalSize - nSize;
}


void
AMF_AddProp(AMFObject *obj, const AMFObjectProperty *prop)
{
  if (!(obj->o_num & 0x0f))
    obj->o_props = (AMFObjectProperty *)
      realloc(obj->o_props, (obj->o_num + 16) * sizeof(AMFObjectProperty));
  obj->o_props[obj->o_num++] = *prop;
}


int
AMF_CountProp(AMFObject *obj)
{
  return obj->o_num;
}


AMFObjectProperty *
AMF_GetProp(AMFObject *obj, const AVal *name, int nIndex)
{
  if (nIndex >= 0)
    {
      if (nIndex <= obj->o_num)
  return &obj->o_props[nIndex];
    }
  else
    {
      int n;
      for (n = 0; n < obj->o_num; n++)
  {
    if (AVMATCH(&obj->o_props[n].p_name, name))
      return &obj->o_props[n];
  }
    }


  return (AMFObjectProperty *)&AMFProp_Invalid;
}


void
AMF_Dump(AMFObject *obj)
{
  int n;
  RTMP_Log(RTMP_LOGDEBUG, "(object begin)");
  for (n = 0; n < obj->o_num; n++)
    {
      AMFProp_Dump(&obj->o_props[n]);
    }
  RTMP_Log(RTMP_LOGDEBUG, "(object end)");
}


void
AMF_Reset(AMFObject *obj)
{
  int n;
  for (n = 0; n < obj->o_num; n++)
    {
      AMFProp_Reset(&obj->o_props[n]);
    }
  free(obj->o_props);
  obj->o_props = NULL;
  obj->o_num = 0;
}




/* AMF3ClassDefinition */


void
AMF3CD_AddProp(AMF3ClassDef *cd, AVal *prop)
{
  if (!(cd->cd_num & 0x0f))
    cd->cd_props = (AVal *)realloc(cd->cd_props, (cd->cd_num + 16) * sizeof(AVal));
  cd->cd_props[cd->cd_num++] = *prop;
}


AVal *
AMF3CD_GetProp(AMF3ClassDef *cd, int nIndex)
{
  if (nIndex >= cd->cd_num)
    return (AVal *)&AV_empty;
  return &cd->cd_props[nIndex];
}


在这里分析一下RTMPdump(libRTMP)连接到支持RTMP协议的服务器的第一步:握手(Hand Shake)。


RTMP连接的过程曾经分析过:RTMP流媒体播放过程


在这里不再细说,分析一下位于handshake.h文件里面实现握手(HandShake)功能的函数:


注意:handshake.h里面代码量很大,但是很多代码都是为了处理RTMP的加密版协议的,例如rtmps;
因此在这里就不做过多分析了,我们只考虑普通的RTMP协议。


static int
HandShake(RTMP * r, int FP9HandShake)
{
  int i, offalg = 0;
  int dhposClient = 0;
  int digestPosClient = 0;
  int encrypted = r->Link.protocol & RTMP_FEATURE_ENC;


  RC4_handle keyIn = 0;
  RC4_handle keyOut = 0;


  int32_t *ip;
  uint32_t uptime;


  uint8_t clientbuf[RTMP_SIG_SIZE + 4], *clientsig=clientbuf+4;
  uint8_t serversig[RTMP_SIG_SIZE], client2[RTMP_SIG_SIZE], *reply;
  uint8_t type;
  getoff *getdh = NULL, *getdig = NULL;


  if (encrypted || r->Link.SWFSize)
    FP9HandShake = TRUE;
  else
  //普通的
    FP9HandShake = FALSE;


  r->Link.rc4keyIn = r->Link.rc4keyOut = 0;


  if (encrypted)
  {
      clientsig[-1] = 0x06; /* 0x08 is RTMPE as well */
      offalg = 1;
  }
  else
    //0x03代表RTMP协议的版本(客户端要求的)
    //数组竟然能有“-1”下标
    //C0中的字段(1B)
    clientsig[-1] = 0x03;


  uptime = htonl(RTMP_GetTime());
  //void *memcpy(void *dest, const void *src, int n);
  //由src指向地址为起始地址的连续n个字节的数据复制到以dest指向地址为起始地址的空间内
  //把uptime的前4字节(其实一共就4字节)数据拷贝到clientsig指向的地址中
  //C1中的字段(4B)
  memcpy(clientsig, &uptime, 4);


  if (FP9HandShake)
  {
      /* set version to at least 9.0.115.0 */
      if (encrypted)
      {
        clientsig[4] = 128;
        clientsig[6] = 3;
      }
      else
      {
        clientsig[4] = 10;
        clientsig[6] = 45;
      }
      clientsig[5] = 0;
      clientsig[7] = 2;


      RTMP_Log(RTMP_LOGDEBUG, "%s: Client type: %02X", __FUNCTION__, clientsig[-1]);
      getdig = digoff[offalg];
      getdh  = dhoff[offalg];
  }
  else
  {
    //void *memset(void *s, int ch, size_t n);将s中前n个字节替换为ch并返回s;
    //将clientsig[4]开始的4个字节替换为0
    //这是C1的字段
    memset(&clientsig[4], 0, 4);
  }


  /* generate random data */
#ifdef _DEBUG
  //将clientsig+8开始的1528个字节替换为0(这是一种简单的方法)
  //这是C1中的random字段
  memset(clientsig+8, 0, RTMP_SIG_SIZE-8);
#else
  //实际中使用rand()循环生成1528字节的伪随机数
  ip = (int32_t *)(clientsig+8);
  for (i = 2; i < RTMP_SIG_SIZE/4; i++)
    *ip++ = rand();
#endif


  /* set handshake digest */
  if (FP9HandShake)
  {
      if (encrypted)
      {
        /* generate Diffie-Hellmann parameters */
        r->Link.dh = DHInit(1024);
        if (!r->Link.dh)
        {
          RTMP_Log(RTMP_LOGERROR, "%s: Couldn't initialize Diffie-Hellmann!",__FUNCTION__);
          return FALSE;
        }


        dhposClient = getdh(clientsig, RTMP_SIG_SIZE);
        RTMP_Log(RTMP_LOGDEBUG, "%s: DH pubkey position: %d", __FUNCTION__, dhposClient);


        if (!DHGenerateKey((DH *)r->Link.dh))
        {
          RTMP_Log(RTMP_LOGERROR, "%s: Couldn't generate Diffie-Hellmann public key!",
            __FUNCTION__);
          return FALSE;
        }


        if (!DHGetPublicKey((DH *)r->Link.dh, &clientsig[dhposClient], 128))
        {
          RTMP_Log(RTMP_LOGERROR, "%s: Couldn't write public key!", __FUNCTION__);
          return FALSE;
        }
      }


      digestPosClient = getdig(clientsig, RTMP_SIG_SIZE); /* reuse this value in verification */
      RTMP_Log(RTMP_LOGDEBUG, "%s: Client digest offset: %d", __FUNCTION__,digestPosClient);


      CalculateDigest(digestPosClient, clientsig, GenuineFPKey, 30,&clientsig[digestPosClient]);


      RTMP_Log(RTMP_LOGDEBUG, "%s: Initial client digest: ", __FUNCTION__);
      RTMP_LogHex(RTMP_LOGDEBUG, clientsig + digestPosClient,SHA256_DIGEST_LENGTH);
    }


#ifdef _DEBUG
  RTMP_Log(RTMP_LOGDEBUG, "Clientsig: ");
  RTMP_LogHex(RTMP_LOGDEBUG, clientsig, RTMP_SIG_SIZE);
#endif
  //发送数据报C0+C1
  //从clientsig-1开始发,长度1536+1,两个包合并
  //握手----------------
  r->dlg->AppendCInfo("建立连接:第1次连接。发送握手数据C0+C1");
  //-----------------------------
  if (!WriteN(r, (char *)clientsig-1, RTMP_SIG_SIZE + 1))
    return FALSE;
  //读取数据报,长度1,存入type
  //是服务器的S0,表示服务器使用的RTMP版本
  if (ReadN(r, (char *)&type, 1) != 1)  /* 0x03 or 0x06 */
    return FALSE;
  //握手----------------
  r->dlg->AppendCInfo("建立连接:第1次连接。接收握手数据S0");
  //-----------------------------
  RTMP_Log(RTMP_LOGDEBUG, "%s: Type Answer   : %02X", __FUNCTION__, type);
  //客户端要求的版本和服务器提供的版本不同
  if (type != clientsig[-1])
    RTMP_Log(RTMP_LOGWARNING, "%s: Type mismatch: client sent %d, server answered %d",
  __FUNCTION__, clientsig[-1], type);
  //握手----------------
  r->dlg->AppendCInfo("建立连接:第1次连接。成功接收握手数据S0,服务器和客户端版本相同");
  //-----------------------------
  //客户端和服务端随机序列长度是否相同
  //握手----------------
  r->dlg->AppendCInfo("建立连接:第1次连接。接收握手数据S1");
  //-----------------------------
  if (ReadN(r, (char *)serversig, RTMP_SIG_SIZE) != RTMP_SIG_SIZE)
    return FALSE;


  /* decode server response */
  //把serversig的前四个字节赋值给uptime
  memcpy(&uptime, serversig, 4);
  //大端转小端
  uptime = ntohl(uptime);


  RTMP_Log(RTMP_LOGDEBUG, "%s: Server Uptime : %d", __FUNCTION__, uptime);
  RTMP_Log(RTMP_LOGDEBUG, "%s: FMS Version   : %d.%d.%d.%d", __FUNCTION__, serversig[4],
      serversig[5], serversig[6], serversig[7]);


  if (FP9HandShake && type == 3 && !serversig[4])
    FP9HandShake = FALSE;


#ifdef _DEBUG
  RTMP_Log(RTMP_LOGDEBUG, "Server signature:");
  RTMP_LogHex(RTMP_LOGDEBUG, serversig, RTMP_SIG_SIZE);
#endif


  if (FP9HandShake)
  {
      uint8_t digestResp[SHA256_DIGEST_LENGTH];
      uint8_t *signatureResp = NULL;


      /* we have to use this signature now to find the correct algorithms for getting the digest and DH positions */
      int digestPosServer = getdig(serversig, RTMP_SIG_SIZE);


      if (!VerifyDigest(digestPosServer, serversig, GenuineFMSKey, 36))
      {
        RTMP_Log(RTMP_LOGWARNING, "Trying different position for server digest!");
        offalg ^= 1;
        getdig = digoff[offalg];
        getdh  = dhoff[offalg];
        digestPosServer = getdig(serversig, RTMP_SIG_SIZE);


        if (!VerifyDigest(digestPosServer, serversig, GenuineFMSKey, 36))
        {
          RTMP_Log(RTMP_LOGERROR, "Couldn't verify the server digest"); /* continuing anyway will probably fail */
          return FALSE;
        }
      }


      /* generate SWFVerification token (SHA256 HMAC hash of decompressed SWF, key are the last 32 bytes of the server handshake) */
      if (r->Link.SWFSize)
      {
        const char swfVerify[] = { 0x01, 0x01 };
        char *vend = r->Link.SWFVerificationResponse+sizeof(r->Link.SWFVerificationResponse);


        memcpy(r->Link.SWFVerificationResponse, swfVerify, 2);
        AMF_EncodeInt32(&r->Link.SWFVerificationResponse[2], vend, r->Link.SWFSize);
        AMF_EncodeInt32(&r->Link.SWFVerificationResponse[6], vend, r->Link.SWFSize);
        HMACsha256(r->Link.SWFHash, SHA256_DIGEST_LENGTH,
                   &serversig[RTMP_SIG_SIZE - SHA256_DIGEST_LENGTH],
                   SHA256_DIGEST_LENGTH,
                   (uint8_t *)&r->Link.SWFVerificationResponse[10]);
      }


      /* do Diffie-Hellmann Key exchange for encrypted RTMP */
      if (encrypted)
      {
        /* compute secret key */
        uint8_t secretKey[128] = { 0 };
        int len, dhposServer;


        dhposServer = getdh(serversig, RTMP_SIG_SIZE);
        RTMP_Log(RTMP_LOGDEBUG, "%s: Server DH public key offset: %d", __FUNCTION__,
          dhposServer);
        len = DHComputeSharedSecretKey((DH *)r->Link.dh, &serversig[dhposServer],
            128, secretKey);
        if (len < 0)
        {
            RTMP_Log(RTMP_LOGDEBUG, "%s: Wrong secret key position!", __FUNCTION__);
            return FALSE;
        }


        RTMP_Log(RTMP_LOGDEBUG, "%s: Secret key: ", __FUNCTION__);
        RTMP_LogHex(RTMP_LOGDEBUG, secretKey, 128);


        InitRC4Encryption(secretKey,
          (uint8_t *) & serversig[dhposServer],
          (uint8_t *) & clientsig[dhposClient],
          &keyIn, &keyOut);
      }




      reply = client2;
#ifdef _DEBUG
      memset(reply, 0xff, RTMP_SIG_SIZE);
#else
      ip = (int32_t *)reply;
      for (i = 0; i < RTMP_SIG_SIZE/4; i++)
        *ip++ = rand();
#endif
      /* calculate response now */
      signatureResp = reply+RTMP_SIG_SIZE-SHA256_DIGEST_LENGTH;


      HMACsha256(&serversig[digestPosServer], SHA256_DIGEST_LENGTH,
     GenuineFPKey, sizeof(GenuineFPKey), digestResp);
      HMACsha256(reply, RTMP_SIG_SIZE - SHA256_DIGEST_LENGTH, digestResp,
     SHA256_DIGEST_LENGTH, signatureResp);


      /* some info output */
      RTMP_Log(RTMP_LOGDEBUG,
    "%s: Calculated digest key from secure key and server digest: ",
    __FUNCTION__);
      RTMP_LogHex(RTMP_LOGDEBUG, digestResp, SHA256_DIGEST_LENGTH);


#ifdef FP10
      if (type == 8 )
        {
    uint8_t *dptr = digestResp;
    uint8_t *sig = signatureResp;
    /* encrypt signatureResp */
          for (i=0; i
      rtmpe8_sig(sig+i, sig+i, dptr[i] % 15);
        }
#if 0
      else if (type == 9))
        {
    uint8_t *dptr = digestResp;
    uint8_t *sig = signatureResp;
    /* encrypt signatureResp */
          for (i=0; i
            rtmpe9_sig(sig+i, sig+i, dptr[i] % 15);
        }
#endif
#endif
      RTMP_Log(RTMP_LOGDEBUG, "%s: Client signature calculated:", __FUNCTION__);
      RTMP_LogHex(RTMP_LOGDEBUG, signatureResp, SHA256_DIGEST_LENGTH);
    }
  else
    {
  //直接赋值
      reply = serversig;
#if 0
      uptime = htonl(RTMP_GetTime());
      memcpy(reply+4, &uptime, 4);
#endif
    }


#ifdef _DEBUG
  RTMP_Log(RTMP_LOGDEBUG, "%s: Sending handshake response: ",
    __FUNCTION__);
  RTMP_LogHex(RTMP_LOGDEBUG, reply, RTMP_SIG_SIZE);
#endif
  //把reply中的1536字节数据发送出去
  //对应C2
  //握手----------------
  r->dlg->AppendCInfo("建立连接:第1次连接。发送握手数据C2");
  //-----------------------------
  if (!WriteN(r, (char *)reply, RTMP_SIG_SIZE))
    return FALSE;


  /* 2nd part of handshake */
  //读取1536字节数据到serversig
  //握手----------------
  r->dlg->AppendCInfo("建立连接:第1次连接。读取握手数据S2");
  //-----------------------------
  if (ReadN(r, (char *)serversig, RTMP_SIG_SIZE) != RTMP_SIG_SIZE)
    return FALSE;


#ifdef _DEBUG
  RTMP_Log(RTMP_LOGDEBUG, "%s: 2nd handshake: ", __FUNCTION__);
  RTMP_LogHex(RTMP_LOGDEBUG, serversig, RTMP_SIG_SIZE);
#endif


  if (FP9HandShake)
  {
      uint8_t signature[SHA256_DIGEST_LENGTH];
      uint8_t digest[SHA256_DIGEST_LENGTH];


      if (serversig[4] == 0 && serversig[5] == 0 && serversig[6] == 0 && serversig[7] == 0)
      {
        RTMP_Log(RTMP_LOGDEBUG,
        "%s: Wait, did the server just refuse signed authentication?",
        __FUNCTION__);
      }
      RTMP_Log(RTMP_LOGDEBUG, "%s: Server sent signature:", __FUNCTION__);
      RTMP_LogHex(RTMP_LOGDEBUG, &serversig[RTMP_SIG_SIZE - SHA256_DIGEST_LENGTH],
       SHA256_DIGEST_LENGTH);


      /* verify server response */
      HMACsha256(&clientsig[digestPosClient], SHA256_DIGEST_LENGTH,
         GenuineFMSKey, sizeof(GenuineFMSKey), digest);
          HMACsha256(serversig, RTMP_SIG_SIZE - SHA256_DIGEST_LENGTH, digest,
         SHA256_DIGEST_LENGTH, signature);


      /* show some information */
      RTMP_Log(RTMP_LOGDEBUG, "%s: Digest key: ", __FUNCTION__);
      RTMP_LogHex(RTMP_LOGDEBUG, digest, SHA256_DIGEST_LENGTH);


#ifdef FP10
      if (type == 8 )
      {
        uint8_t *dptr = digest;
        uint8_t *sig = signature;
        /* encrypt signature */
        for (i=0; i
          rtmpe8_sig(sig+i, sig+i, dptr[i] % 15);
      }
#if 0
      else if (type == 9)
      {
        uint8_t *dptr = digest;
        uint8_t *sig = signature;
        /* encrypt signatureResp */
          for (i=0; i
            rtmpe9_sig(sig+i, sig+i, dptr[i] % 15);
      }
#endif
#endif
      RTMP_Log(RTMP_LOGDEBUG, "%s: Signature calculated:", __FUNCTION__);
      RTMP_LogHex(RTMP_LOGDEBUG, signature, SHA256_DIGEST_LENGTH);
      if (memcmp(signature, &serversig[RTMP_SIG_SIZE - SHA256_DIGEST_LENGTH],
         SHA256_DIGEST_LENGTH) != 0)
      {
        RTMP_Log(RTMP_LOGWARNING, "%s: Server not genuine Adobe!", __FUNCTION__);
        return FALSE;
      }
      else
      {
        RTMP_Log(RTMP_LOGDEBUG, "%s: Genuine Adobe Flash Media Server", __FUNCTION__);
      }


      if (encrypted)
      {
        char buff[RTMP_SIG_SIZE];
        /* set keys for encryption from now on */
        r->Link.rc4keyIn = keyIn;
        r->Link.rc4keyOut = keyOut;




        /* update the keystreams */
        if (r->Link.rc4keyIn)
        {
          RC4_encrypt((RC4_KEY *)r->Link.rc4keyIn, RTMP_SIG_SIZE, (uint8_t *) buff);
        }


        if (r->Link.rc4keyOut)
        {
          RC4_encrypt((RC4_KEY *)r->Link.rc4keyOut, RTMP_SIG_SIZE, (uint8_t *) buff);
        }
      }
    }
    else
    {
      //int memcmp(const void *buf1, const void *buf2, unsigned int count); 当buf1=buf2时,返回值=0 
      //比较serversig和clientsig是否相等
      //握手----------------
      r->dlg->AppendCInfo("建立连接:第1次连接。比较握手数据签名");
      //-----------------------------
      if (memcmp(serversig, clientsig, RTMP_SIG_SIZE) != 0)
      {
        //握手----------------
        r->dlg->AppendCInfo("建立连接:第1次连接。握手数据签名不匹配!");
        //-----------------------------
        RTMP_Log(RTMP_LOGWARNING, "%s: client signature does not match!",
        __FUNCTION__);
      }
    }
    //握手----------------
    r->dlg->AppendCInfo("建立连接:第1次连接。握手成功");
    //-----------------------------
    RTMP_Log(RTMP_LOGDEBUG, "%s: Handshaking finished....", __FUNCTION__);
    return TRUE;
}


本篇文章分析一下RTMPdump里面的建立一个流媒体连接过程中的函数调用。


之前已经简单分析过流媒体链接的建立过程:


RTMP流媒体播放过程
而且分析过其函数调用过程:


RTMPDump源代码分析 0: 主要函数调用分析
在这里就不详细叙述了,其实主要是这两个函数:


RTMP_Connect()


RTMP_ConnectStream()


第一个函数用于建立RTMP中的NetConnection,第二个函数用于建立RTMP中的NetStream。一般是先调用第一个函数,然后调用第二个函数。




下面先来看看RTMP_Connect():


注意:贴上去的源代码是修改过的RTMPdump,我添加了输出信息的代码,
形如:r->dlg->AppendCInfo("建立连接:第0次连接。开始建立Socket连接");
改代码不影响程序运行,可忽略。


RTMP_Connect()


//连接
int
RTMP_Connect(RTMP *r, RTMPPacket *cp)
{
  //Socket结构体
  struct sockaddr_in service;
  if (!r->Link.hostname.av_len)
    return FALSE;


  memset(&service, 0, sizeof(struct sockaddr_in));
  service.sin_family = AF_INET;


  if (r->Link.socksport)
  {
      //加入地址信息
      /* 使用SOCKS连接 */
      if (!add_addr_info(&service, &r->Link.sockshost, r->Link.socksport))
        return FALSE;
  }
  else
  {
      /* 直接连接 */
      if (!add_addr_info(&service, &r->Link.hostname, r->Link.port))
        return FALSE;
  }
  //-----------------
  r->dlg->AppendCInfo("建立连接:第0次连接。开始建立Socket连接");
  //-----------------------------
  if (!RTMP_Connect0(r, (struct sockaddr *)&service)){
  r->dlg->AppendCInfo("建立连接:第0次连接。建立Socket连接失败");
    return FALSE;
  }
  //-----------------
  r->dlg->AppendCInfo("建立连接:第0次连接。建立Socket连接成功");
  //-----------------------------
  r->m_bSendCounter = TRUE;


  return RTMP_Connect1(r, cp);
}


我们可以看出调用了两个函数RTMP_Connect0()以及RTMP_Connect1()。按照按先后顺序看看吧:
RTMP_Connect0()
//sockaddr是Linux网络编程的地址结构体一种,其定义如下:
//struct sockaddr{ 
//  unsigned short sa_family; 
//  char sa_data[14];
//}; 
//说明:sa_family:是地址家族,也称作,协议族,一般都是“AF_xxx”的形式。通常大多用的是都是AF_INET。
//  sa_data:是14字节协议地址。
//有时不使用sockaddr,而使用sockaddr_in(多用在windows)(等价)
//struct sockaddr_in {
//  short int sin_family;              /* Address family */
//  unsigned short int sin_port;       /* Port number */
//  struct in_addr sin_addr;           /* Internet address */
//  unsigned char sin_zero[8];         /* Same size as struct sockaddr */
//};
//union {
//   struct{
//      unsigned char s_b1,s_b2,s_b3,s_b4;
//         } S_un_b;
//    struct {
//      unsigned short s_w1,s_w2;
//         } S_un_w;
//    unsigned long S_addr;
//   } S_un;
//} in_addr;
//第0次连接,建立Socket连接
int
RTMP_Connect0(RTMP *r, struct sockaddr * service)
{
  int on = 1;
  r->m_sb.sb_timedout = FALSE;
  r->m_pausing = 0;
  r->m_fDuration = 0.0;
  //创建一个Socket,并把Socket序号赋值给相应变量
  //-----------------
  r->dlg->AppendCInfo("建立连接:第0次连接。create一个Socket");
  //-----------------------------
  r->m_sb.sb_socket = socket(AF_INET, SOCK_STREAM, IPPROTO_TCP);
  if (r->m_sb.sb_socket != -1)
  {


    //定义函数 int connect (int sockfd,struct sockaddr * serv_addr,int addrlen);   
    //函数说明 connect()用来将参数sockfd 的Socket(刚刚创建)连至参数serv_addr 
    //指定的网络地址。参数addrlen为sockaddr的结构长度。 
    //连接
    RTMP_LogPrintf("建立Socket连接!\n");
    //-----------------
    r->dlg->AppendCInfo("建立连接:第0次连接。connect该Socket");
    //-----------------------------
    if (connect(r->m_sb.sb_socket, service, sizeof(struct sockaddr)) < 0)
    {
      //-----------------
      r->dlg->AppendCInfo("建立连接:第0次连接。connect该Socket失败");
      //-----------------------------
      int err = GetSockError();
      RTMP_Log(RTMP_LOGERROR, "%s, failed to connect socket. %d (%s)",
          __FUNCTION__, err, strerror(err));
      RTMP_Close(r);
      return FALSE;
    }
    //-----------------
    r->dlg->AppendCInfo("建立连接:第0次连接。connect该Socket成功");
    //-----------------------------
    //指定了端口号。注:这不是必需的。
    if (r->Link.socksport)
    {
      RTMP_Log(RTMP_LOGDEBUG, "%s ... SOCKS negotiation", __FUNCTION__);
      //谈判?发送数据报以进行谈判?!
      if (!SocksNegotiate(r))
      {
        RTMP_Log(RTMP_LOGERROR, "%s, SOCKS negotiation failed.", __FUNCTION__);
        RTMP_Close(r);
        return FALSE;
      }
    }
  }
  else
  {
      RTMP_Log(RTMP_LOGERROR, "%s, failed to create socket. Error: %d", __FUNCTION__,
        GetSockError());
      return FALSE;
  }


  /* set timeout */
  //超时
  {
    SET_RCVTIMEO(tv, r->Link.timeout);
    if (setsockopt
        (r->m_sb.sb_socket, SOL_SOCKET, SO_RCVTIMEO, (char *)&tv, sizeof(tv)))
    {
        RTMP_Log(RTMP_LOGERROR, "%s, Setting socket timeout to %ds failed!",
      __FUNCTION__, r->Link.timeout);
    }
  }


  setsockopt(r->m_sb.sb_socket, IPPROTO_TCP, TCP_NODELAY, (char *) &on, sizeof(on));


  return TRUE;
}


可见RTMP_Connect0()主要用于建立Socket连接,并未开始真正的建立RTMP连接。
再来看看RTMP_Connect1(),这是真正建立RTMP连接的函数:


RTMP_Connect1()
//第1次连接,从握手开始
int
RTMP_Connect1(RTMP *r, RTMPPacket *cp)
{
  if (r->Link.protocol & RTMP_FEATURE_SSL)
  {
#if defined(CRYPTO) && !defined(NO_SSL)
      TLS_client(RTMP_TLS_ctx, r->m_sb.sb_ssl);
      TLS_setfd((SSL *)r->m_sb.sb_ssl, r->m_sb.sb_socket);
      if (TLS_connect((SSL *)r->m_sb.sb_ssl) < 0)
      {
        RTMP_Log(RTMP_LOGERROR, "%s, TLS_Connect failed", __FUNCTION__);
        RTMP_Close(r);
        return FALSE;
      }
#else
      RTMP_Log(RTMP_LOGERROR, "%s, no SSL/TLS support", __FUNCTION__);
      RTMP_Close(r);
      return FALSE;


#endif
  }
  //使用HTTP
  if (r->Link.protocol & RTMP_FEATURE_HTTP)
  {
      r->m_msgCounter = 1;
      r->m_clientID.av_val = NULL;
      r->m_clientID.av_len = 0;
      HTTP_Post(r, RTMPT_OPEN, "", 1);
      HTTP_read(r, 1);
      r->m_msgCounter = 0;
  }
  RTMP_Log(RTMP_LOGDEBUG, "%s, ... connected, handshaking", __FUNCTION__);
  //握手----------------
  r->dlg->AppendCInfo("建立连接:第1次连接。开始握手(HandShake)");
  //-----------------------------
  RTMP_LogPrintf("开始握手(HandShake)!\n");
  if (!HandShake(r, TRUE))
  {
    //----------------
    r->dlg->AppendCInfo("建立连接:第1次连接。握手(HandShake)失败!");
    //-----------------------------
      RTMP_Log(RTMP_LOGERROR, "%s, handshake failed.", __FUNCTION__);
      RTMP_Close(r);
      return FALSE;
  }
  //----------------
  r->dlg->AppendCInfo("建立连接:第1次连接。握手(HandShake)成功");
  //-----------------------------
  RTMP_LogPrintf("握手(HandShake)完毕!\n");
  RTMP_Log(RTMP_LOGDEBUG, "%s, handshaked", __FUNCTION__);
  //发送“connect”命令--------------
  //----------------
  r->dlg->AppendCInfo("建立连接:第1次连接。开始建立网络连接(NetConnection)");
  //-----------------------------
  RTMP_LogPrintf("开始建立网络连接(NetConnection)!\n");
  //----------------
  r->dlg->AppendCInfo("发送数据。消息 命令 (typeID=20) (Connect)。");
  //-----------------------------
  if (!SendConnectPacket(r, cp))
  {
    //----------------
    r->dlg->AppendCInfo("建立连接:第1次连接。建立网络连接(NetConnection)失败!");
    //-----------------------------
      RTMP_Log(RTMP_LOGERROR, "%s, RTMP connect failed.", __FUNCTION__);
      RTMP_Close(r);
      return FALSE;
  }
  //----------------
  r->dlg->AppendCInfo("建立连接:第1次连接。建立网络连接(NetConnection)成功");
  //-----------------------------
  RTMP_LogPrintf("命令消息“Connect”发送完毕!\n");
  return TRUE;
}


该函数做了以下事情:
HandShake()完成握手,之前已经分析过;
SendConnectPacket()发送包含“connect”命令的数据报,用于开始建立RTMP连接。具体该函数是怎么调用的,以后有机会再进行分析。


至此RTMP_Connect()分析完毕。


前文已经分析了 RTMPdump中建立一个NetConnection的过程:RTMPdump 源代码分析 5: 建立一个流媒体连接 (NetConnection部分)


多余的话不多说,下面先来看看RTMP_ConnectStream(),该函数主要用于在NetConnection基础上建立一个NetStream。


RTMP_ConnectStream()
//创建流
int
RTMP_ConnectStream(RTMP *r, int seekTime)
{
  RTMPPacket packet = { 0 };


  /* seekTime was already set by SetupStream / SetupURL.
   * This is only needed by ReconnectStream.
   */
  if (seekTime > 0)
    r->Link.seekTime = seekTime;


  r->m_mediaChannel = 0;


  while (!r->m_bPlaying && RTMP_IsConnected(r) && RTMP_ReadPacket(r, &packet))
  {
      if (RTMPPacket_IsReady(&packet))
      {
        if (!packet.m_nBodySize)
          continue;
        if ((packet.m_packetType == RTMP_PACKET_TYPE_AUDIO) ||
          (packet.m_packetType == RTMP_PACKET_TYPE_VIDEO) ||
          (packet.m_packetType == RTMP_PACKET_TYPE_INFO))
        {
          RTMP_Log(RTMP_LOGWARNING, "Received FLV packet before play()! Ignoring.");
          RTMPPacket_Free(&packet);
          continue;
        }
      //处理Packet!
      //----------------
      r->dlg->AppendCInfo("建立网络流:处理收到的数据。开始处理收到的数据");
      //-----------------------------
      RTMP_ClientPacket(r, &packet);
      //----------------
      r->dlg->AppendCInfo("建立网络流:处理收到的数据。处理完毕,清除数据。");
      //-----------------------------
      RTMPPacket_Free(&packet);
    }
  }


  return r->m_bPlaying;
}




乍一看,这个函数的代码量好像挺少的,实际上不然,其复杂度还是挺高的。我觉得比RTMP_Connect()要复杂不少。
其关键就在于这个While()循环。
首先,循环的三个条件都满足,就能进行循环。只有出错或者建立网络流(NetStream)的步骤完成后,才能跳出循环。


在这个函数中有两个函数尤为重要:


RTMP_ReadPacket()


RTMP_ClientPacket()


第一个函数的作用是读取通过Socket接收下来的消息(Message)包,但是不做任何处理。
第二个函数则是处理消息(Message),并做出响应。
这两个函数结合,就可以完成接收消息然后响应消息的步骤。


下面来开一下RTMP_ReadPacket():
//读取收下来的Chunk
int
RTMP_ReadPacket(RTMP *r, RTMPPacket *packet)
{
  //packet 存读取完后的的数据
  //Chunk Header最大值18
  uint8_t hbuf[RTMP_MAX_HEADER_SIZE] = { 0 };
  //header 指向的是从Socket中收下来的数据
  char *header = (char *)hbuf;
  int nSize, hSize, nToRead, nChunk;
  int didAlloc = FALSE;


  RTMP_Log(RTMP_LOGDEBUG2, "%s: fd=%d", __FUNCTION__, r->m_sb.sb_socket);
  //收下来的数据存入hbuf
  if (ReadN(r, (char *)hbuf, 1) == 0)
  {
      RTMP_Log(RTMP_LOGERROR, "%s, failed to read RTMP packet header", __FUNCTION__);
      return FALSE;
  }
  //块类型fmt
  packet->m_headerType = (hbuf[0] & 0xc0) >> 6;
  //块流ID(2-63)
  packet->m_nChannel = (hbuf[0] & 0x3f);
  header++;
  //块流ID第1字节为0时,块流ID占2个字节
  if (packet->m_nChannel == 0)
  {
    if (ReadN(r, (char *)&hbuf[1], 1) != 1)
    {
      RTMP_Log(RTMP_LOGERROR, "%s, failed to read RTMP packet header 2nd byte",
        __FUNCTION__);
      return FALSE;
    }
    //计算块流ID(64-319)
      packet->m_nChannel = hbuf[1];
      packet->m_nChannel += 64;
      header++;
  }
  //块流ID第1字节为0时,块流ID占3个字节
  else if (packet->m_nChannel == 1)
  {
    int tmp;
    if (ReadN(r, (char *)&hbuf[1], 2) != 2)
    {
      RTMP_Log(RTMP_LOGERROR, "%s, failed to read RTMP packet header 3nd byte",
        __FUNCTION__);
      return FALSE;
    }
    tmp = (hbuf[2] << 8) + hbuf[1];
    //计算块流ID(64-65599)
    packet->m_nChannel = tmp + 64;
    RTMP_Log(RTMP_LOGDEBUG, "%s, m_nChannel: %0x", __FUNCTION__, packet->m_nChannel);
    header += 2;
  }
  //ChunkHeader的大小(4种)
  nSize = packetSize[packet->m_headerType];


  if (nSize == RTMP_LARGE_HEADER_SIZE)  /* if we get a full header the timestamp is absolute */
    packet->m_hasAbsTimestamp = TRUE; //11字节的完整ChunkMsgHeader的TimeStamp是绝对值
  else if (nSize < RTMP_LARGE_HEADER_SIZE)
  {       
    /* using values from the last message of this channel */
    if (r->m_vecChannelsIn[packet->m_nChannel])
      memcpy(packet, r->m_vecChannelsIn[packet->m_nChannel],sizeof(RTMPPacket));
  }


  nSize--;


  if (nSize > 0 && ReadN(r, header, nSize) != nSize)
  {
    RTMP_Log(RTMP_LOGERROR, "%s, failed to read RTMP packet header. type: %x",
      __FUNCTION__, (unsigned int)hbuf[0]);
      return FALSE;
  }


  hSize = nSize + (header - (char *)hbuf);


  if (nSize >= 3)
  {
    //TimeStamp(注意 BigEndian to SmallEndian)(11,7,3字节首部都有)
    packet->m_nTimeStamp = AMF_DecodeInt24(header);


    /*RTMP_Log(RTMP_LOGDEBUG, "%s, reading RTMP packet chunk on channel %x, headersz %i, timestamp %i, abs timestamp %i", __FUNCTION__, packet.m_nChannel, nSize, packet.m_nTimeStamp, packet.m_hasAbsTimestamp); */
    //消息长度(11,7字节首部都有)
    if (nSize >= 6)
    {
      packet->m_nBodySize = AMF_DecodeInt24(header + 3);
      packet->m_nBytesRead = 0;
      RTMPPacket_Free(packet);
      //(11,7字节首部都有)
      if (nSize > 6)
      {
        //Msg type ID
        packet->m_packetType = header[6];
        //Msg Stream ID
        if (nSize == 11)
          packet->m_nInfoField2 = DecodeInt32LE(header + 7);
      }
    }
    //Extend TimeStamp
    if (packet->m_nTimeStamp == 0xffffff)
    {
      if (ReadN(r, header + nSize, 4) != 4)
      {
        RTMP_Log(RTMP_LOGERROR, "%s, failed to read extended timestamp",
              __FUNCTION__);
        return FALSE;
      }
      packet->m_nTimeStamp = AMF_DecodeInt32(header + nSize);
      hSize += 4;
    }
  }


  RTMP_LogHexString(RTMP_LOGDEBUG2, (uint8_t *)hbuf, hSize);


  if (packet->m_nBodySize > 0 && packet->m_body == NULL)
  {
    if (!RTMPPacket_Alloc(packet, packet->m_nBodySize))
    {
      RTMP_Log(RTMP_LOGDEBUG, "%s, failed to allocate packet", __FUNCTION__);
      return FALSE;
    }
    didAlloc = TRUE;
    packet->m_headerType = (hbuf[0] & 0xc0) >> 6;
  }


  nToRead = packet->m_nBodySize - packet->m_nBytesRead;
  nChunk = r->m_inChunkSize;
  if (nToRead < nChunk)
    nChunk = nToRead;


  /* Does the caller want the raw chunk? */
  if (packet->m_chunk)
  {
      packet->m_chunk->c_headerSize = hSize;
      memcpy(packet->m_chunk->c_header, hbuf, hSize);
      packet->m_chunk->c_chunk = packet->m_body + packet->m_nBytesRead;
      packet->m_chunk->c_chunkSize = nChunk;
  }


  if (ReadN(r, packet->m_body + packet->m_nBytesRead, nChunk) != nChunk)
  {
      RTMP_Log(RTMP_LOGERROR, "%s, failed to read RTMP packet body. len: %lu",
    __FUNCTION__, packet->m_nBodySize);
      return FALSE;
  }


  RTMP_LogHexString(RTMP_LOGDEBUG2, (uint8_t *)packet->m_body + packet->m_nBytesRead, nChunk);


  packet->m_nBytesRead += nChunk;


  /* keep the packet as ref for other packets on this channel */
  if (!r->m_vecChannelsIn[packet->m_nChannel])
    r->m_vecChannelsIn[packet->m_nChannel] = (RTMPPacket *) malloc(sizeof(RTMPPacket));
  memcpy(r->m_vecChannelsIn[packet->m_nChannel], packet, sizeof(RTMPPacket));
  //读取完毕
  if (RTMPPacket_IsReady(packet))
  {
      /* make packet's timestamp absolute */
      if (!packet->m_hasAbsTimestamp)
        packet->m_nTimeStamp += r->m_channelTimestamp[packet->m_nChannel];  /* timestamps seem to be always relative!! */


      r->m_channelTimestamp[packet->m_nChannel] = packet->m_nTimeStamp;


      /* reset the data from the stored packet. we keep the header since we may use it later if a new packet for this channel */
      /* arrives and requests to re-use some info (small packet header) */
      r->m_vecChannelsIn[packet->m_nChannel]->m_body = NULL;
      r->m_vecChannelsIn[packet->m_nChannel]->m_nBytesRead = 0;
      r->m_vecChannelsIn[packet->m_nChannel]->m_hasAbsTimestamp = FALSE;  /* can only be false if we reuse header */
  }
  else
  {
      packet->m_body = NULL;  /* so it won't be erased on free */
  }


  return TRUE;
}


在这里要注意的是,接收下来的实际上是块(Chunk)而不是消息(Message),
因为消息(Message)在网络上传播的时候,实际上要分割成块(Chunk)。
这里解析的就是块(Chunk)


可参考:RTMP规范简单分析


具体的解析代码我就不多说了,直接参考RTMP协议规范就可以了,一个字节一个字节的解析就OK了。




书接上回:RTMPdump 源代码分析 6: 建立一个流媒体连接 (NetStream部分 1)


上回说到,有两个函数尤为重要:


RTMP_ReadPacket()


RTMP_ClientPacket()


而且分析了第一个函数。现在我们再来看看第二个函数吧。第二个函数的主要作用是:处理消息(Message),并做出响应。
先把带注释的代码贴上:


//处理接收到的Chunk
int
RTMP_ClientPacket(RTMP *r, RTMPPacket *packet)
{
  int bHasMediaPacket = 0;
  switch (packet->m_packetType)
  {
    //RTMP消息类型ID=1,设置块大小
    case 0x01:
      /* chunk size */
      //----------------
      r->dlg->AppendCInfo("处理收到的数据。消息 Set Chunk Size (typeID=1)。");
      //-----------------------------
      RTMP_LogPrintf("处理消息 Set Chunk Size (typeID=1)\n");
      HandleChangeChunkSize(r, packet);
      break;
    //RTMP消息类型ID=3,致谢
    case 0x03:
      /* bytes read report */
      RTMP_Log(RTMP_LOGDEBUG, "%s, received: bytes read report", __FUNCTION__);
      break;
    //RTMP消息类型ID=4,用户控制
    case 0x04:
      /* ctrl */
      //----------------
      r->dlg->AppendCInfo("处理收到的数据。消息 User Control (typeID=4)。");
      //-----------------------------
      RTMP_LogPrintf("处理消息 User Control (typeID=4)\n");
      HandleCtrl(r, packet);
      break;
    //RTMP消息类型ID=5
    case 0x05:
      /* server bw */
      //----------------
      r->dlg->AppendCInfo("处理收到的数据。消息 Window Acknowledgement Size (typeID=5)。");
      //-----------------------------
      RTMP_LogPrintf("处理消息 Window Acknowledgement Size (typeID=5)\n");
      HandleServerBW(r, packet);
      break;
    //RTMP消息类型ID=6
    case 0x06:
      /* client bw */
      //----------------
      r->dlg->AppendCInfo("处理收到的数据。消息 Set Peer Bandwidth (typeID=6)。");
      //-----------------------------
      RTMP_LogPrintf("处理消息 Set Peer Bandwidth (typeID=6)\n");
      HandleClientBW(r, packet);
      break;
    //RTMP消息类型ID=8,音频数据
    case 0x08:
      /* audio data */
      /*RTMP_Log(RTMP_LOGDEBUG, "%s, received: audio %lu bytes", __FUNCTION__, packet.m_nBodySize); */
      HandleAudio(r, packet);
      bHasMediaPacket = 1;
      if (!r->m_mediaChannel)
        r->m_mediaChannel = packet->m_nChannel;
      if (!r->m_pausing)
        r->m_mediaStamp = packet->m_nTimeStamp;
      break;
    //RTMP消息类型ID=9,视频数据
    case 0x09:
      /* video data */
      /*RTMP_Log(RTMP_LOGDEBUG, "%s, received: video %lu bytes", __FUNCTION__, packet.m_nBodySize); */
      HandleVideo(r, packet);
      bHasMediaPacket = 1;
      if (!r->m_mediaChannel)
        r->m_mediaChannel = packet->m_nChannel;
      if (!r->m_pausing)
        r->m_mediaStamp = packet->m_nTimeStamp;
      break;
    //RTMP消息类型ID=15,AMF3编码,忽略
    case 0x0F:      /* flex stream send */
      RTMP_Log(RTMP_LOGDEBUG,
        "%s, flex stream send, size %lu bytes, not supported, ignoring",
        __FUNCTION__, packet->m_nBodySize);
      break;
    //RTMP消息类型ID=16,AMF3编码,忽略
    case 0x10:      /* flex shared object */
      RTMP_Log(RTMP_LOGDEBUG,
        "%s, flex shared object, size %lu bytes, not supported, ignoring",
        __FUNCTION__, packet->m_nBodySize);
      break;
    //RTMP消息类型ID=17,AMF3编码,忽略
    case 0x11:      /* flex message */
    {
      RTMP_Log(RTMP_LOGDEBUG,
        "%s, flex message, size %lu bytes, not fully supported",
        __FUNCTION__, packet->m_nBodySize);
      /*RTMP_LogHex(packet.m_body, packet.m_nBodySize); */


      /* some DEBUG code */
#if 0
     RTMP_LIB_AMFObject obj;
     int nRes = obj.Decode(packet.m_body+1, packet.m_nBodySize-1);
     if(nRes < 0) {
     RTMP_Log(RTMP_LOGERROR, "%s, error decoding AMF3 packet", __FUNCTION__);
     /*return; */
     }


     obj.Dump();
#endif


      if (HandleInvoke(r, packet->m_body + 1, packet->m_nBodySize - 1) == 1)
        bHasMediaPacket = 2;
      break;
    }
    //RTMP消息类型ID=18,AMF0编码,数据消息
    case 0x12:
      /* metadata (notify) */


      RTMP_Log(RTMP_LOGDEBUG, "%s, received: notify %lu bytes", __FUNCTION__,
      packet->m_nBodySize);
      //处理元数据,暂时注释
      /*
      if (HandleMetadata(r, packet->m_body, packet->m_nBodySize))
        bHasMediaPacket = 1;
      break;
      */
    //RTMP消息类型ID=19,AMF0编码,忽略
    case 0x13:
      RTMP_Log(RTMP_LOGDEBUG, "%s, shared object, not supported, ignoring",
        __FUNCTION__);
      break;
    //RTMP消息类型ID=20,AMF0编码,命令消息
    //处理命令消息!
    case 0x14:
      //----------------
      r->dlg->AppendCInfo("处理收到的数据。消息 命令 (AMF0编码) (typeID=20)。");
      //-----------------------------
      /* invoke */
      RTMP_Log(RTMP_LOGDEBUG, "%s, received: invoke %lu bytes", __FUNCTION__,
      packet->m_nBodySize);
      RTMP_LogPrintf("处理命令消息 (typeID=20,AMF0编码)\n");
      /*RTMP_LogHex(packet.m_body, packet.m_nBodySize); */


      if (HandleInvoke(r, packet->m_body, packet->m_nBodySize) == 1)
        bHasMediaPacket = 2;
      break;
    //RTMP消息类型ID=22
    case 0x16:
    {
      /* go through FLV packets and handle metadata packets */
      unsigned int pos = 0;
      uint32_t nTimeStamp = packet->m_nTimeStamp;


      while (pos + 11 < packet->m_nBodySize)
      {
        uint32_t dataSize = AMF_DecodeInt24(packet->m_body + pos + 1);  /* size without header (11) and prevTagSize (4) */


        if (pos + 11 + dataSize + 4 > packet->m_nBodySize)
        {
          RTMP_Log(RTMP_LOGWARNING, "Stream corrupt?!");
          break;
        }
        if (packet->m_body[pos] == 0x12)
        {
          HandleMetadata(r, packet->m_body + pos + 11, dataSize);
        }
        else if (packet->m_body[pos] == 8 || packet->m_body[pos] == 9)
        {
          nTimeStamp = AMF_DecodeInt24(packet->m_body + pos + 4);
          nTimeStamp |= (packet->m_body[pos + 7] << 24);
        }
        pos += (11 + dataSize + 4);
      }
      if (!r->m_pausing)
      r->m_mediaStamp = nTimeStamp;


      /* FLV tag(s) */
      /*RTMP_Log(RTMP_LOGDEBUG, "%s, received: FLV tag(s) %lu bytes", __FUNCTION__, packet.m_nBodySize); */
      bHasMediaPacket = 1;
      break;
    }
    default:
      RTMP_Log(RTMP_LOGDEBUG, "%s, unknown packet type received: 0x%02x", __FUNCTION__,
      packet->m_packetType);
#ifdef _DEBUG
      RTMP_LogHex(RTMP_LOGDEBUG, (const uint8_t *)packet->m_body, packet->m_nBodySize);
#endif
    }


  return bHasMediaPacket;
}


里面注释的比较多,可以看出,大体的思路是,根据接收到的消息(Message)类型的不同,做出不同的响应。
例如收到的消息类型为0x01,那么就是设置块(Chunk)大小的协议,那么就调用相应的函数进行处理。
因此,本函数可以说是程序的灵魂,收到的各种命令消息都要经过本函数的判断决定调用哪个函数进行相应的处理。


在这里注意一下消息类型为0x14的消息,即消息类型ID为20的消息,是AMF0编码的命令消息。
这在RTMP连接中是非常常见的,比如说各种控制命令:播放,暂停,停止等等。我们来仔细看看它的调用。


可以发现它调用了HandleInvoke()函数来处理服务器发来的AMF0编码的命令,来看看细节:


/* Returns 0 for OK/Failed/error, 1 for 'Stop or Complete' */
static int
HandleInvoke(RTMP *r, const char *body, unsigned int nBodySize)
{
  AMFObject obj;
  AVal method;
  int txn;
  int ret = 0, nRes;
  if (body[0] != 0x02)    /* make sure it is a string method name we start with */
  {
      RTMP_Log(RTMP_LOGWARNING, "%s, Sanity failed. no string method in invoke packet",
        __FUNCTION__);
      return 0;
  }


  nRes = AMF_Decode(&obj, body, nBodySize, FALSE);
  if (nRes < 0)
  {
      RTMP_Log(RTMP_LOGERROR, "%s, error decoding invoke packet", __FUNCTION__);
      return 0;
  }


  AMF_Dump(&obj);
  AMFProp_GetString(AMF_GetProp(&obj, NULL, 0), &method);
  txn = (int)AMFProp_GetNumber(AMF_GetProp(&obj, NULL, 1));
  RTMP_Log(RTMP_LOGDEBUG, "%s, server invoking <%s>", __FUNCTION__, method.av_val);


  if (AVMATCH(&method, &av__result))
  {
    AVal methodInvoked = {0};
    int i;


    for (i=0; im_numCalls; i++) {
      if (r->m_methodCalls[i].num == txn) {
        methodInvoked = r->m_methodCalls[i].name;
        AV_erase(r->m_methodCalls, &r->m_numCalls, i, FALSE);
        break;
      }
    }
    if (!methodInvoked.av_val) {
        RTMP_Log(RTMP_LOGDEBUG, "%s, received result id %d without matching request",
       __FUNCTION__, txn);
       goto leave;
    }
    //----------------
    char temp_str[100];
    sprintf(temp_str,"接收数据。消息 %s 的 Result",methodInvoked.av_val);
    r->dlg->AppendCInfo(temp_str);
    //-----------------------------
    RTMP_Log(RTMP_LOGDEBUG, "%s, received result for method call <%s>", __FUNCTION__,
    methodInvoked.av_val);


    if (AVMATCH(&methodInvoked, &av_connect))
    {
      //----------------
      r->dlg->AppendMLInfo(20,0,"命令消息","Result (Connect)");
      //-----------------------------
      if (r->Link.token.av_len)
      {
        AMFObjectProperty p;
        if (RTMP_FindFirstMatchingProperty(&obj, &av_secureToken, &p))
        {
          DecodeTEA(&r->Link.token, &p.p_vu.p_aval);
          SendSecureTokenResponse(r, &p.p_vu.p_aval);
        }
      }
      if (r->Link.protocol & RTMP_FEATURE_WRITE)
      {
        SendReleaseStream(r);
        SendFCPublish(r);
      }
      else
      {
        //----------------
        r->dlg->AppendCInfo("发送数据。消息 Window Acknowledgement Size (typeID=5)。");
        //-----------------------------
        RTMP_LogPrintf("发送消息Window Acknowledgement Size(typeID=5)\n");
        RTMP_SendServerBW(r);
        RTMP_SendCtrl(r, 3, 0, 300);
      }
      //----------------
      r->dlg->AppendCInfo("发送数据。消息 命令 (typeID=20) (CreateStream)。");
      //-----------------------------
      RTMP_LogPrintf("发送命令消息“CreateStream” (typeID=20)\n");
      RTMP_SendCreateStream(r);


      if (!(r->Link.protocol & RTMP_FEATURE_WRITE))
      {
        /* Send the FCSubscribe if live stream or if subscribepath is set */
        if (r->Link.subscribepath.av_len)
          SendFCSubscribe(r, &r->Link.subscribepath);
        else if (r->Link.lFlags & RTMP_LF_LIVE)
          SendFCSubscribe(r, &r->Link.playpath);
      }
  }
  else if (AVMATCH(&methodInvoked, &av_createStream))
  {
    //----------------
    r->dlg->AppendMLInfo(20,0,"命令消息","Result (CreateStream)");
    //-----------------------------
    r->m_stream_id = (int)AMFProp_GetNumber(AMF_GetProp(&obj, NULL, 3));


    if (r->Link.protocol & RTMP_FEATURE_WRITE)
    {
        SendPublish(r);
    }
    else
    {
        if (r->Link.lFlags & RTMP_LF_PLST)
          SendPlaylist(r);
      //----------------
      r->dlg->AppendCInfo("发送数据。消息 命令 (typeID=20) (Play)。");
      //-----------------------------
      RTMP_LogPrintf("发送命令消息“play” (typeID=20)\n");
        SendPlay(r);
        RTMP_SendCtrl(r, 3, r->m_stream_id, r->m_nBufferMS);
    }
  }
  else if (AVMATCH(&methodInvoked, &av_play) ||
        AVMATCH(&methodInvoked, &av_publish))
  {
    //----------------
    r->dlg->AppendMLInfo(20,0,"命令消息","Result (Play or Publish)");
    //-----------------------------
    r->m_bPlaying = TRUE;
  }
  free(methodInvoked.av_val);
  }
  else if (AVMATCH(&method, &av_onBWDone))
  {
    //----------------
    r->dlg->AppendMLInfo(20,0,"命令消息","onBWDone");
    //-----------------------------
    if (!r->m_nBWCheckCounter)
        SendCheckBW(r);
  }
  else if (AVMATCH(&method, &av_onFCSubscribe))
  {
      /* SendOnFCSubscribe(); */
  }
  else if (AVMATCH(&method, &av_onFCUnsubscribe))
  {
    //----------------
    r->dlg->AppendMLInfo(20,0,"命令消息","onFCUnsubscribe");
    //-----------------------------
      RTMP_Close(r);
      ret = 1;
  }
  else if (AVMATCH(&method, &av_ping))
  {
    //----------------
    r->dlg->AppendMLInfo(20,0,"命令消息","Ping");
    //-----------------------------
      SendPong(r, txn);
  }
  else if (AVMATCH(&method, &av__onbwcheck))
  {
    //----------------
    r->dlg->AppendMLInfo(20,0,"命令消息","onBWcheck");
    //-----------------------------
      SendCheckBWResult(r, txn);
  }
  else if (AVMATCH(&method, &av__onbwdone))
  {
    //----------------
    r->dlg->AppendMLInfo(20,0,"命令消息","onBWdone");
    //-----------------------------
      int i;
      for (i = 0; i < r->m_numCalls; i++)
    if (AVMATCH(&r->m_methodCalls[i].name, &av__checkbw))
    {
      AV_erase(r->m_methodCalls, &r->m_numCalls, i, TRUE);
      break;
    }
  }
  else if (AVMATCH(&method, &av__error))
  {
    //----------------
    r->dlg->AppendMLInfo(20,0,"命令消息","error");
    //-----------------------------
      RTMP_Log(RTMP_LOGERROR, "rtmp server sent error");
  }
  else if (AVMATCH(&method, &av_close))
  {
    //----------------
    r->dlg->AppendMLInfo(20,0,"命令消息","close");
    //-----------------------------
      RTMP_Log(RTMP_LOGERROR, "rtmp server requested close");
      RTMP_Close(r);
  }
  else if (AVMATCH(&method, &av_onStatus))
  {
    //----------------
    r->dlg->AppendMLInfo(20,0,"命令消息","onStatus");
    //-----------------------------
      AMFObject obj2;
      AVal code, level;
      AMFProp_GetObject(AMF_GetProp(&obj, NULL, 3), &obj2);
      AMFProp_GetString(AMF_GetProp(&obj2, &av_code, -1), &code);
      AMFProp_GetString(AMF_GetProp(&obj2, &av_level, -1), &level);


      RTMP_Log(RTMP_LOGDEBUG, "%s, onStatus: %s", __FUNCTION__, code.av_val);
      if (AVMATCH(&code, &av_NetStream_Failed)
        || AVMATCH(&code, &av_NetStream_Play_Failed)
        || AVMATCH(&code, &av_NetStream_Play_StreamNotFound)
        || AVMATCH(&code, &av_NetConnection_Connect_InvalidApp))
      {
        r->m_stream_id = -1;
        RTMP_Close(r);
        RTMP_Log(RTMP_LOGERROR, "Closing connection: %s", code.av_val);
      }
  else if (AVMATCH(&code, &av_NetStream_Play_Start))
  {
    int i;
    r->m_bPlaying = TRUE;
    for (i = 0; i < r->m_numCalls; i++)
      {
        if (AVMATCH(&r->m_methodCalls[i].name, &av_play))
    {
      AV_erase(r->m_methodCalls, &r->m_numCalls, i, TRUE);
      break;
    }
      }
  }


      else if (AVMATCH(&code, &av_NetStream_Publish_Start))
  {
    int i;
    r->m_bPlaying = TRUE;
    for (i = 0; i < r->m_numCalls; i++)
      {
        if (AVMATCH(&r->m_methodCalls[i].name, &av_publish))
    {
      AV_erase(r->m_methodCalls, &r->m_numCalls, i, TRUE);
      break;
    }
      }
  }


      /* Return 1 if this is a Play.Complete or Play.Stop */
      else if (AVMATCH(&code, &av_NetStream_Play_Complete)
    || AVMATCH(&code, &av_NetStream_Play_Stop)
    || AVMATCH(&code, &av_NetStream_Play_UnpublishNotify))
  {
    RTMP_Close(r);
    ret = 1;
  }


      else if (AVMATCH(&code, &av_NetStream_Seek_Notify))
        {
    r->m_read.flags &= ~RTMP_READ_SEEKING;
  }


      else if (AVMATCH(&code, &av_NetStream_Pause_Notify))
        {
    if (r->m_pausing == 1 || r->m_pausing == 2)
    {
      RTMP_SendPause(r, FALSE, r->m_pauseStamp);
      r->m_pausing = 3;
    }
  }
    }
  else if (AVMATCH(&method, &av_playlist_ready))
    {
    //----------------
    r->dlg->AppendMLInfo(20,0,"命令消息","playlist_ready");
    //-----------------------------
      int i;
      for (i = 0; i < r->m_numCalls; i++)
        {
          if (AVMATCH(&r->m_methodCalls[i].name, &av_set_playlist))
      {
        AV_erase(r->m_methodCalls, &r->m_numCalls, i, TRUE);
        break;
      }
        }
    }
  else
    {


    }
leave:
  AMF_Reset(&obj);
  return ret;
}


int
RTMP_FindFirstMatchingProperty(AMFObject *obj, const AVal *name,
             AMFObjectProperty * p)
{
  int n;
  /* this is a small object search to locate the "duration" property */
  for (n = 0; n < obj->o_num; n++)
    {
      AMFObjectProperty *prop = AMF_GetProp(obj, NULL, n);


      if (AVMATCH(&prop->p_name, name))
  {
    *p = *prop;
    return TRUE;
  }


      if (prop->p_type == AMF_OBJECT)
  {
    if (RTMP_FindFirstMatchingProperty(&prop->p_vu.p_object, name, p))
      return TRUE;
  }
    }
  return FALSE;
}


该函数主要做了以下几步:


1.调用AMF_Decode()解码AMF命令数据


2.调用AMFProp_GetString()获取具体命令的字符串


3.调用AVMATCH()比较字符串,不同的命令做不同的处理,例如以下几个:


AVMATCH(&methodInvoked, &av_connect)
AVMATCH(&methodInvoked, &av_createStream)
AVMATCH(&methodInvoked, &av_play)
AVMATCH(&methodInvoked, &av_publish)
AVMATCH(&method, &av_onBWDone)


等等,不一一例举了


具体的处理过程如下所示。在这里说一个“建立网络流”(createStream)的例子,通常发生在建立网络连接(NetConnection)之后,播放(Play)之前。


else if (AVMATCH(&methodInvoked, &av_createStream))
{
    //----------------
    r->dlg->AppendMLInfo(20,0,"命令消息","Result (CreateStream)");
    //-----------------------------
    r->m_stream_id = (int)AMFProp_GetNumber(AMF_GetProp(&obj, NULL, 3));


    if (r->Link.protocol & RTMP_FEATURE_WRITE)
    {
        SendPublish(r);
    }
    else
    {
      if (r->Link.lFlags & RTMP_LF_PLST)
          SendPlaylist(r);
      //----------------
      r->dlg->AppendCInfo("发送数据。消息 命令 (typeID=20) (Play)。");
      //-----------------------------
      RTMP_LogPrintf("发送命令消息“play” (typeID=20)\n");
        SendPlay(r);
        RTMP_SendCtrl(r, 3, r->m_stream_id, r->m_nBufferMS);
    }
}


由代码可见,程序先获取了stream_id,
然后发送了两个消息(Message),分别是SendPlaylist()和SendPlay(),用于获取播放列表,以及开始播放流媒体数据。




之前写了一系列的文章介绍RTMPDump各种函数。比如怎么建立网络连接(NetConnection),怎么建立网络流(NetStream)之类的,
唯独没有介绍这些发送或接收的数据,在底层到底是怎么实现的。
本文就是要剖析一下其内部的实现。即这些消息(Message)到底是怎么发送和接收的。


先来看看发送消息吧。


发送connect命令使用函数SendConnectPacket()


发送createstream命令使用RTMP_SendCreateStream()


发送realeaseStream命令使用SendReleaseStream()
发送publish命令使用SendPublish()
发送deleteStream的命令使用SendDeleteStream()
发送pause命令使用RTMP_SendPause()
不再一一例举,发现函数命名有两种规律:RTMP_Send***()或者Send***(),其中*号代表命令的名称。


SendConnectPacket()这个命令是每次程序开始运行的时候发送的第一个命令消息,内容比较多,包含了很多AMF编码的内容,在此不多做分析,
贴上代码:
//发送“connect”命令
static int
SendConnectPacket(RTMP *r, RTMPPacket *cp)
{
  RTMPPacket packet;
  char pbuf[4096], *pend = pbuf + sizeof(pbuf);
  char *enc;


  if (cp)
    return RTMP_SendPacket(r, cp, TRUE);


  packet.m_nChannel = 0x03; /* control channel (invoke) */
  packet.m_headerType = RTMP_PACKET_SIZE_LARGE;
  packet.m_packetType = 0x14; /* INVOKE */
  packet.m_nTimeStamp = 0;
  packet.m_nInfoField2 = 0;
  packet.m_hasAbsTimestamp = 0;
  packet.m_body = pbuf + RTMP_MAX_HEADER_SIZE;


  enc = packet.m_body;
  enc = AMF_EncodeString(enc, pend, &av_connect);
  enc = AMF_EncodeNumber(enc, pend, ++r->m_numInvokes);
  *enc++ = AMF_OBJECT;


  enc = AMF_EncodeNamedString(enc, pend, &av_app, &r->Link.app);
  if (!enc)
    return FALSE;
  if (r->Link.protocol & RTMP_FEATURE_WRITE)
    {
      enc = AMF_EncodeNamedString(enc, pend, &av_type, &av_nonprivate);
      if (!enc)
  return FALSE;
    }
  if (r->Link.flashVer.av_len)
    {
      enc = AMF_EncodeNamedString(enc, pend, &av_flashVer, &r->Link.flashVer);
      if (!enc)
  return FALSE;
    }
  if (r->Link.swfUrl.av_len)
    {
      enc = AMF_EncodeNamedString(enc, pend, &av_swfUrl, &r->Link.swfUrl);
      if (!enc)
  return FALSE;
    }
  if (r->Link.tcUrl.av_len)
    {
      enc = AMF_EncodeNamedString(enc, pend, &av_tcUrl, &r->Link.tcUrl);
      if (!enc)
  return FALSE;
    }
  if (!(r->Link.protocol & RTMP_FEATURE_WRITE))
    {
      enc = AMF_EncodeNamedBoolean(enc, pend, &av_fpad, FALSE);
      if (!enc)
  return FALSE;
      enc = AMF_EncodeNamedNumber(enc, pend, &av_capabilities, 15.0);
      if (!enc)
  return FALSE;
      enc = AMF_EncodeNamedNumber(enc, pend, &av_audioCodecs, r->m_fAudioCodecs);
      if (!enc)
  return FALSE;
      enc = AMF_EncodeNamedNumber(enc, pend, &av_videoCodecs, r->m_fVideoCodecs);
      if (!enc)
  return FALSE;
      enc = AMF_EncodeNamedNumber(enc, pend, &av_videoFunction, 1.0);
      if (!enc)
  return FALSE;
      if (r->Link.pageUrl.av_len)
  {
    enc = AMF_EncodeNamedString(enc, pend, &av_pageUrl, &r->Link.pageUrl);
    if (!enc)
      return FALSE;
  }
    }
  if (r->m_fEncoding != 0.0 || r->m_bSendEncoding)
    { /* AMF0, AMF3 not fully supported yet */
      enc = AMF_EncodeNamedNumber(enc, pend, &av_objectEncoding, r->m_fEncoding);
      if (!enc)
  return FALSE;
    }
  if (enc + 3 >= pend)
    return FALSE;
  *enc++ = 0;
  *enc++ = 0;     /* end of object - 0x00 0x00 0x09 */
  *enc++ = AMF_OBJECT_END;


  /* add auth string */
  if (r->Link.auth.av_len)
    {
      enc = AMF_EncodeBoolean(enc, pend, r->Link.lFlags & RTMP_LF_AUTH);
      if (!enc)
  return FALSE;
      enc = AMF_EncodeString(enc, pend, &r->Link.auth);
      if (!enc)
  return FALSE;
    }
  if (r->Link.extras.o_num)
    {
      int i;
      for (i = 0; i < r->Link.extras.o_num; i++)
  {
    enc = AMFProp_Encode(&r->Link.extras.o_props[i], enc, pend);
    if (!enc)
      return FALSE;
  }
    }
  packet.m_nBodySize = enc - packet.m_body;
  //----------------
  r->dlg->AppendMLInfo(20,1,"命令消息","Connect");
  //-----------------------------
  return RTMP_SendPacket(r, &packet, TRUE);
}


RTMP_SendCreateStream()命令相对而言比较简单,代码如下:
//发送“createstream”命令
int
RTMP_SendCreateStream(RTMP *r)
{
  RTMPPacket packet;
  char pbuf[256], *pend = pbuf + sizeof(pbuf);
  char *enc;


  packet.m_nChannel = 0x03; /* control channel (invoke) */
  packet.m_headerType = RTMP_PACKET_SIZE_MEDIUM;
  packet.m_packetType = 0x14; /* INVOKE */
  packet.m_nTimeStamp = 0;
  packet.m_nInfoField2 = 0;
  packet.m_hasAbsTimestamp = 0;
  packet.m_body = pbuf + RTMP_MAX_HEADER_SIZE;


  enc = packet.m_body;
  enc = AMF_EncodeString(enc, pend, &av_createStream);
  enc = AMF_EncodeNumber(enc, pend, ++r->m_numInvokes);
  *enc++ = AMF_NULL;    /* NULL */


  packet.m_nBodySize = enc - packet.m_body;
  //----------------
  r->dlg->AppendMLInfo(20,1,"命令消息","CreateStream");
  //-----------------------------
  return RTMP_SendPacket(r, &packet, TRUE);
}


同样,SendReleaseStream()内容也比较简单,我对其中部分内容作了注释:


//发送RealeaseStream命令
static int
SendReleaseStream(RTMP *r)
{
  RTMPPacket packet;
  char pbuf[1024], *pend = pbuf + sizeof(pbuf);
  char *enc;


  packet.m_nChannel = 0x03; /* control channel (invoke) */
  packet.m_headerType = RTMP_PACKET_SIZE_MEDIUM;
  packet.m_packetType = 0x14; /* INVOKE */
  packet.m_nTimeStamp = 0;
  packet.m_nInfoField2 = 0;
  packet.m_hasAbsTimestamp = 0;
  packet.m_body = pbuf + RTMP_MAX_HEADER_SIZE;


 enc = packet.m_body;
  //对“releaseStream”字符串进行AMF编码
  enc = AMF_EncodeString(enc, pend, &av_releaseStream);
  //对传输ID(0)进行AMF编码?
  enc = AMF_EncodeNumber(enc, pend, ++r->m_numInvokes);
  //命令对象
  *enc++ = AMF_NULL;
  //对播放路径字符串进行AMF编码
  enc = AMF_EncodeString(enc, pend, &r->Link.playpath);
  if (!enc)
    return FALSE;


  packet.m_nBodySize = enc - packet.m_body;
  //----------------
  r->dlg->AppendMLInfo(20,1,"命令消息","ReleaseStream");
  //-----------------------------
  return RTMP_SendPacket(r, &packet, FALSE);
}


再来看一个SendPublish()函数,用于发送“publish”命令


//发送Publish命令
static int
SendPublish(RTMP *r)
{
  RTMPPacket packet;
  char pbuf[1024], *pend = pbuf + sizeof(pbuf);
  char *enc;
  //块流ID为4
  packet.m_nChannel = 0x04; /* source channel (invoke) */
  packet.m_headerType = RTMP_PACKET_SIZE_LARGE;
  //命令消息,类型20
  packet.m_packetType = 0x14; /* INVOKE */
  packet.m_nTimeStamp = 0;
  //流ID
  packet.m_nInfoField2 = r->m_stream_id;
  packet.m_hasAbsTimestamp = 0;
  packet.m_body = pbuf + RTMP_MAX_HEADER_SIZE;
  //指向Chunk的负载
  enc = packet.m_body;
   //对“publish”字符串进行AMF编码
  enc = AMF_EncodeString(enc, pend, &av_publish);
  enc = AMF_EncodeNumber(enc, pend, ++r->m_numInvokes);
  //命令对象为空
  *enc++ = AMF_NULL;
  enc = AMF_EncodeString(enc, pend, &r->Link.playpath);
  if (!enc)
    return FALSE;


  /* FIXME: should we choose live based on Link.lFlags & RTMP_LF_LIVE? */
  enc = AMF_EncodeString(enc, pend, &av_live);
  if (!enc)
    return FALSE;


  packet.m_nBodySize = enc - packet.m_body;
  //----------------
  r->dlg->AppendMLInfo(20,1,"命令消息","Pulish");
  //-----------------------------
  return RTMP_SendPacket(r, &packet, TRUE);
}


其他的命令不再一一例举,总体的思路是声明一个RTMPPacket类型的结构体,然后设置各种属性值,最后交给RTMP_SendPacket()进行发送。
RTMPPacket类型的结构体定义如下,一个RTMPPacket对应RTMP协议规范里面的一个块(Chunk)。


//Chunk信息
  typedef struct RTMPPacket
  {
    uint8_t m_headerType;//ChunkMsgHeader的类型(4种)
    uint8_t m_packetType;//Message type ID(1-7协议控制;8,9音视频;10以后为AMF编码消息)
    uint8_t m_hasAbsTimestamp;  /* Timestamp 是绝对值还是相对值? */
    int m_nChannel;     //块流ID
    uint32_t m_nTimeStamp;  // Timestamp
    int32_t m_nInfoField2;  /* last 4 bytes in a long header,消息流ID */
    uint32_t m_nBodySize; //消息长度
    uint32_t m_nBytesRead;
    RTMPChunk *m_chunk;
    char *m_body;
  } RTMPPacket;


下面我们来看看RTMP_SendPacket()吧,各种的RTMPPacket(即各种Chunk)都需要用这个函数进行发送。
//自己编一个数据报发送出去!
//非常常用
int
RTMP_SendPacket(RTMP *r, RTMPPacket *packet, int queue)
{
  const RTMPPacket *prevPacket = r->m_vecChannelsOut[packet->m_nChannel];
  uint32_t last = 0;
  int nSize;
  int hSize, cSize;
  char *header, *hptr, *hend, hbuf[RTMP_MAX_HEADER_SIZE], c;
  uint32_t t;
  char *buffer, *tbuf = NULL, *toff = NULL;
  int nChunkSize;
  int tlen;
  //不是完整ChunkMsgHeader
  if (prevPacket && packet->m_headerType != RTMP_PACKET_SIZE_LARGE)
    {
      /* compress a bit by using the prev packet's attributes */
  //获取ChunkMsgHeader的类型
  //前一个Chunk和这个Chunk对比
      if (prevPacket->m_nBodySize == packet->m_nBodySize
    && prevPacket->m_packetType == packet->m_packetType
    && packet->m_headerType == RTMP_PACKET_SIZE_MEDIUM)
  packet->m_headerType = RTMP_PACKET_SIZE_SMALL;




      if (prevPacket->m_nTimeStamp == packet->m_nTimeStamp
    && packet->m_headerType == RTMP_PACKET_SIZE_SMALL)
  packet->m_headerType = RTMP_PACKET_SIZE_MINIMUM;
    //上一个packet的TimeStamp
      last = prevPacket->m_nTimeStamp;
    }
  
  if (packet->m_headerType > 3) /* sanity */
    {
      RTMP_Log(RTMP_LOGERROR, "sanity failed!! trying to send header of type: 0x%02x.",
    (unsigned char)packet->m_headerType);
      return FALSE;
    }
  //chunk包头大小;packetSize[] = { 12, 8, 4, 1 }
  nSize = packetSize[packet->m_headerType];
  hSize = nSize; cSize = 0;
  //相对的TimeStamp
  t = packet->m_nTimeStamp - last;


  if (packet->m_body)
    {
  //Header的Start
  //m_body是指向负载数据首地址的指针;“-”号用于指针前移
      header = packet->m_body - nSize;
  //Header的End
      hend = packet->m_body;
    }
  else
    {
      header = hbuf + 6;
      hend = hbuf + sizeof(hbuf);
    }
  //当ChunkStreamID大于319时
  if (packet->m_nChannel > 319)
  //ChunkBasicHeader是3个字节
    cSize = 2;
  //当ChunkStreamID大于63时
  else if (packet->m_nChannel > 63)
  //ChunkBasicHeader是2个字节
    cSize = 1;
  if (cSize)
    {
  //header指针指向ChunkMsgHeader
      header -= cSize;
  //hsize加上ChunkBasicHeader的长度
      hSize += cSize;
    }
  //相对TimeStamp大于0xffffff,此时需要使用ExtendTimeStamp
  if (nSize > 1 && t >= 0xffffff)
    {
      header -= 4;
      hSize += 4;
    }


  hptr = header;
  //把ChunkBasicHeader的Fmt类型左移6位
  c = packet->m_headerType << 6;
  switch (cSize)
    {
  //把ChunkBasicHeader的低6位设置成ChunkStreamID
    case 0:
      c |= packet->m_nChannel;
      break;
  //同理,但低6位设置成000000
    case 1:
      break;
  //同理,但低6位设置成000001
    case 2:
      c |= 1;
      break;
    }
  //可以拆分成两句*hptr=c;hptr++,此时hptr指向第2个字节
  *hptr++ = c;
  //CSize>0,即ChunkBasicHeader大于1字节
  if (cSize)
    {
  //将要放到第2字节的内容tmp
      int tmp = packet->m_nChannel - 64;
  //获取低位存储与第2字节
      *hptr++ = tmp & 0xff;
  //ChunkBasicHeader是最大的3字节时
      if (cSize == 2)
  //获取高位存储于最后1个字节(注意:排序使用大端序列,和主机相反)
  *hptr++ = tmp >> 8;
    }
  //ChunkMsgHeader。注意一共有4种,包含的字段数不同。
  //TimeStamp(3B)
  if (nSize > 1)
    {
  //相对TimeStamp和绝对TimeStamp?
      hptr = AMF_EncodeInt24(hptr, hend, t > 0xffffff ? 0xffffff : t);
    }
  //MessageLength+MessageTypeID(4B)
  if (nSize > 4)
    {
  //MessageLength
      hptr = AMF_EncodeInt24(hptr, hend, packet->m_nBodySize);
  //MessageTypeID
      *hptr++ = packet->m_packetType;
    }
  //MessageStreamID(4B)
  if (nSize > 8)
    hptr += EncodeInt32LE(hptr, packet->m_nInfoField2);
  
  //ExtendedTimeStamp
  if (nSize > 1 && t >= 0xffffff)
    hptr = AMF_EncodeInt32(hptr, hend, t);
  //负载长度,指向负载的指针
  nSize = packet->m_nBodySize;
  buffer = packet->m_body;
  //Chunk大小,默认128字节
  nChunkSize = r->m_outChunkSize;


  RTMP_Log(RTMP_LOGDEBUG2, "%s: fd=%d, size=%d", __FUNCTION__, r->m_sb.sb_socket,
      nSize);
  /* send all chunks in one HTTP request */
  //使用HTTP
  if (r->Link.protocol & RTMP_FEATURE_HTTP)
    {
  //nSize:Message负载长度;nChunkSize:Chunk长度;
  //例nSize:307,nChunkSize:128;
  //可分为(307+128-1)/128=3个
  //为什么+nChunkSize-1?因为除法会只取整数部分!
      int chunks = (nSize+nChunkSize-1) / nChunkSize;
  //Chunk个数超过一个
      if (chunks > 1)
        {
  //注意:CSize=1表示ChunkBasicHeader是2字节
  //消息分n块后总的开销:
  //n个ChunkBasicHeader,1个ChunkMsgHeader,1个Message负载
  //实际中只有第一个Chunk是完整的,剩下的只有ChunkBasicHeader
    tlen = chunks * (cSize + 1) + nSize + hSize;
  //分配内存
    tbuf = (char *) malloc(tlen);
    if (!tbuf)
      return FALSE;
    toff = tbuf;
  }
  //消息的负载+头
    }
  while (nSize + hSize)
    {
      int wrote;
    //消息负载
      if (nSize < nChunkSize)
  //Chunk可能小于设定值
  nChunkSize = nSize;


      RTMP_LogHexString(RTMP_LOGDEBUG2, (uint8_t *)header, hSize);
      RTMP_LogHexString(RTMP_LOGDEBUG2, (uint8_t *)buffer, nChunkSize);
      if (tbuf)
        {
  //void *memcpy(void *dest, const void *src, int n);
  //由src指向地址为起始地址的连续n个字节的数据复制到以dest指向地址为起始地址的空间内
    memcpy(toff, header, nChunkSize + hSize);
    toff += nChunkSize + hSize;
  }
      else
        {
    wrote = WriteN(r, header, nChunkSize + hSize);
    if (!wrote)
      return FALSE;
  }
    //消息负载长度-Chunk负载长度
      nSize -= nChunkSize;
    //Buffer指针前移1个Chunk负载长度
      buffer += nChunkSize;
      hSize = 0;
    
    //如果消息没有发完
      if (nSize > 0)
  {
  //ChunkBasicHeader
    header = buffer - 1;
    hSize = 1;
    if (cSize)
      {
        header -= cSize;
        hSize += cSize;
      }
    //ChunkBasicHeader第1个字节
    *header = (0xc0 | c);
    //ChunkBasicHeader大于1字节
    if (cSize)
      {
        int tmp = packet->m_nChannel - 64;
        header[1] = tmp & 0xff;
        if (cSize == 2)
    header[2] = tmp >> 8;
      }
  }
    }
  if (tbuf)
    {
  //
      int wrote = WriteN(r, tbuf, toff-tbuf);
      free(tbuf);
      tbuf = NULL;
      if (!wrote)
        return FALSE;
    }


  /* we invoked a remote method */
  if (packet->m_packetType == 0x14)
    {
      AVal method;
      char *ptr;
      ptr = packet->m_body + 1;
      AMF_DecodeString(ptr, &method);
      RTMP_Log(RTMP_LOGDEBUG, "Invoking %s", method.av_val);
      /* keep it in call queue till result arrives */
      if (queue) {
        int txn;
        ptr += 3 + method.av_len;
        txn = (int)AMF_DecodeNumber(ptr);
  AV_queue(&r->m_methodCalls, &r->m_numCalls, &method, txn);
      }
    }


  if (!r->m_vecChannelsOut[packet->m_nChannel])
    r->m_vecChannelsOut[packet->m_nChannel] = (RTMPPacket *) malloc(sizeof(RTMPPacket));
  memcpy(r->m_vecChannelsOut[packet->m_nChannel], packet, sizeof(RTMPPacket));
  return TRUE;
}
这个函数乍一看好像非常复杂,其实不然,他只是按照RTMP规范将数据编码成符合规范的块(Chunk),规范可以参考相关的文档。
具体怎么编码成块(Chunk)就不多分析了,在这里需要注意一个函数:WriteN()。该函数完成了将数据发送出去的功能。


来看一下WriteN()函数:


//发送数据报的时候调用(连接,buffer,长度)
static int
WriteN(RTMP *r, const char *buffer, int n)
{
  const char *ptr = buffer;
#ifdef CRYPTO
  char *encrypted = 0;
  char buf[RTMP_BUFFER_CACHE_SIZE];


  if (r->Link.rc4keyOut)
    {
      if (n > sizeof(buf))
  encrypted = (char *)malloc(n);
      else
  encrypted = (char *)buf;
      ptr = encrypted;
      RC4_encrypt2((RC4_KEY *)r->Link.rc4keyOut, n, buffer, ptr);
    }
#endif


  while (n > 0)
    {
      int nBytes;
    //因方式的不同而调用不同函数
    //如果使用的是HTTP协议进行连接
      if (r->Link.protocol & RTMP_FEATURE_HTTP)
        nBytes = HTTP_Post(r, RTMPT_SEND, ptr, n);
      else
        nBytes = RTMPSockBuf_Send(&r->m_sb, ptr, n);
      /*RTMP_Log(RTMP_LOGDEBUG, "%s: %d\n", __FUNCTION__, nBytes); */
    //成功发送字节数<0
      if (nBytes < 0)
  {
    int sockerr = GetSockError();
    RTMP_Log(RTMP_LOGERROR, "%s, RTMP send error %d (%d bytes)", __FUNCTION__,
        sockerr, n);


    if (sockerr == EINTR && !RTMP_ctrlC)
      continue;


    RTMP_Close(r);
    n = 1;
    break;
  }


      if (nBytes == 0)
  break;


      n -= nBytes;
      ptr += nBytes;
    }


#ifdef CRYPTO
  if (encrypted && encrypted != buf)
    free(encrypted);
#endif


  return n == 0;
}


该函数中,RTMPSockBuf_Send()完成了数据发送的功能,再来看看这个函数(函数调用真是好多啊。。。。)
//Socket发送(指明套接字,buffer缓冲区,数据长度)
//返回所发数据量
int
RTMPSockBuf_Send(RTMPSockBuf *sb, const char *buf, int len)
{
  int rc;


#ifdef _DEBUG
  fwrite(buf, 1, len, netstackdump);
#endif


#if defined(CRYPTO) && !defined(NO_SSL)
  if (sb->sb_ssl)
    {
      rc = TLS_write((SSL *)sb->sb_ssl, buf, len);
    }
  else
#endif
    {
  //向一个已连接的套接口发送数据。
  //int send( SOCKET s, const char * buf, int len, int flags);
  //s:一个用于标识已连接套接口的描述字。
  //buf:包含待发送数据的缓冲区。   
  //len:缓冲区中数据的长度。
  //flags:调用执行方式。
  //rc:所发数据量。
      rc = send(sb->sb_socket, buf, len, 0);
    }
  return rc;
}


int
RTMPSockBuf_Close(RTMPSockBuf *sb)
{
#if defined(CRYPTO) && !defined(NO_SSL)
  if (sb->sb_ssl)
    {
      TLS_shutdown((SSL *)sb->sb_ssl);
      TLS_close((SSL *)sb->sb_ssl);
      sb->sb_ssl = NULL;
    }
#endif
  return closesocket(sb->sb_socket);
}
到这个函数的时候,发现一层层的调用终于完成了,最后调用了系统Socket的send()函数完成了数据的发送功能。
之前贴过一张图总结这个过程,可能理解起来要方便一些:RTMPDump源代码分析 0: 主要函数调用分析




前一篇文章分析了RTMPdump(libRTMP) 的发送消息(Message)方面的源代码:RTMPdump(libRTMP) 源代码分析 8: 发送消息(Message)


在这里在研究研究接收消息(Message)的源代码,接收消息最典型的应用就是接收视音频数据了,
因为视频和音频分别都属于RTMP协议规范中的一种消息。在这里主要分析接收视音频数据。


RTMPdump中完成视音频数据的接收(也可以说是视音频数据的下载)的函数是:RTMP_Read()。
RTMPdump主程序中的Download()函数就是通过调用RTMP_Read()完成数据接收,从而实现下载的。


那么我们马上开始吧,首先看看RTMP_Read()函数:
//FLV文件头
static const char flvHeader[] = { 'F', 'L', 'V', 0x01,
  0x00,       /* 0x04代表有音频, 0x01代表有视频 */
  0x00, 0x00, 0x00, 0x09,
  0x00, 0x00, 0x00, 0x00
};


#define HEADERBUF (128*1024)
int
RTMP_Read(RTMP *r, char *buf, int size)
{
  int nRead = 0, total = 0;


  /* can't continue */
fail:
  switch (r->m_read.status) {
  case RTMP_READ_EOF:
  case RTMP_READ_COMPLETE:
    return 0;
  case RTMP_READ_ERROR:  /* corrupted stream, resume failed */
    SetSockError(EINVAL);
    return -1;
  default:
    break;
  }


  /* first time thru */
  if (!(r->m_read.flags & RTMP_READ_HEADER))
  {
    if (!(r->m_read.flags & RTMP_READ_RESUME))
    {
      //分配内存,指向buf的首部和尾部
      char *mybuf = (char *) malloc(HEADERBUF), *end = mybuf + HEADERBUF;
      int cnt = 0;
      //buf指向同一地址
      r->m_read.buf = mybuf;
      r->m_read.buflen = HEADERBUF;
  
      //把Flv的首部复制到mybuf指向的内存
      //RTMP传递的多媒体数据是“砍头”的FLV文件
      memcpy(mybuf, flvHeader, sizeof(flvHeader));
      //m_read.buf指针后移flvheader个单位
      r->m_read.buf += sizeof(flvHeader);
      //buf长度增加flvheader长度
      r->m_read.buflen -= sizeof(flvHeader);
      //timestamp=0,不是多媒体数据
      while (r->m_read.timestamp == 0)
      {
        //读取一个Packet,到r->m_read.buf
        //nRead为读取结果标记
        nRead = Read_1_Packet(r, r->m_read.buf, r->m_read.buflen);
        //有错误
        if (nRead < 0)
        {
          free(mybuf);
          r->m_read.buf = NULL;
          r->m_read.buflen = 0;
          r->m_read.status = nRead;
          goto fail;
        }
        /* buffer overflow, fix buffer and give up */
        if (r->m_read.buf < mybuf || r->m_read.buf > end) {
          mybuf = (char *) realloc(mybuf, cnt + nRead);
          memcpy(mybuf+cnt, r->m_read.buf, nRead);
          r->m_read.buf = mybuf+cnt+nRead;
          break;
        }
        //
        //记录读取的字节数
        cnt += nRead;
        //m_read.buf指针后移nRead个单位
        r->m_read.buf += nRead;
        r->m_read.buflen -= nRead;
        //当dataType=00000101时,即有视频和音频时
        //说明有多媒体数据了
        if (r->m_read.dataType == 5)
          break;
      }
      //读入数据类型
      //注意:mybuf指针位置一直没动
      //mybuf[4]中第 6 位表示是否存在音频Tag。第 8 位表示是否存在视频Tag。 
      mybuf[4] = r->m_read.dataType;
      //两个指针之间的差
      r->m_read.buflen = r->m_read.buf - mybuf;
      r->m_read.buf = mybuf;
      //这句很重要!后面memcopy
      r->m_read.bufpos = mybuf;
    }
    //flags标明已经读完了文件头
    r->m_read.flags |= RTMP_READ_HEADER;
  }


  if ((r->m_read.flags & RTMP_READ_SEEKING) && r->m_read.buf)
  {
      /* drop whatever's here */
      free(r->m_read.buf);
      r->m_read.buf = NULL;
      r->m_read.bufpos = NULL;
      r->m_read.buflen = 0;
  }


  /* If there's leftover data buffered, use it up */
  if (r->m_read.buf)
  {
      nRead = r->m_read.buflen;
      if (nRead > size)
        nRead = size;
      //m_read.bufpos指向mybuf
      memcpy(buf, r->m_read.bufpos, nRead);
      r->m_read.buflen -= nRead;
      if (!r->m_read.buflen)
      {
        free(r->m_read.buf);
        r->m_read.buf = NULL;
        r->m_read.bufpos = NULL;
      }
      else
      {
        r->m_read.bufpos += nRead;
      }
      buf += nRead;
      total += nRead;
      size -= nRead;
    }
    //接着读
    while (size > 0 && (nRead = Read_1_Packet(r, buf, size)) >= 0)
    {
      if (!nRead) continue;
      buf += nRead;
      total += nRead;
      size -= nRead;
      break;
    }
    if (nRead < 0)
      r->m_read.status = nRead;


    if (size < 0)
      total += size;
    return total;
}


程序关键的地方都已经注释上了代码,在此就不重复说明了。
有一点要提一下:RTMP传送的视音频数据的格式和FLV(FLash Video)格式是一样的,把接收下来的数据直接存入文件就可以了。
但是这些视音频数据没有文件头,是纯视音频数据,因此需要在其前面加上FLV格式的文件头,
这样得到的数据存成文件后才能被一般的视频播放器所播放。FLV格式的文件头是13个字节,如代码中所示。


RTMP_Read()中实际读取数据的函数是Read_1_Packet(),它的功能是从网络上读取一个RTMPPacket的数据,来看看它的源代码吧:
/* 从流媒体中读取多媒体packet。
 * Returns -3 if Play.Close/Stop, -2 if fatal error, -1 if no more media
 * packets, 0 if ignorable error, >0 if there is a media packet
 */
static int
Read_1_Packet(RTMP *r, char *buf, unsigned int buflen)
{
  uint32_t prevTagSize = 0;
  int rtnGetNextMediaPacket = 0, ret = RTMP_READ_EOF;
  RTMPPacket packet = { 0 };
  int recopy = FALSE;
  unsigned int size;
  char *ptr, *pend;
  uint32_t nTimeStamp = 0;
  unsigned int len;
  //获取下一个packet
  rtnGetNextMediaPacket = RTMP_GetNextMediaPacket(r, &packet);
  while (rtnGetNextMediaPacket)
  {
      char *packetBody = packet.m_body;
      unsigned int nPacketLen = packet.m_nBodySize;


      /* Return -3 if this was completed nicely with invoke message
       * Play.Stop or Play.Complete
       */
    if (rtnGetNextMediaPacket == 2)
    {
      RTMP_Log(RTMP_LOGDEBUG,
        "Got Play.Complete or Play.Stop from server. "
        "Assuming stream is complete");
      ret = RTMP_READ_COMPLETE;
      break;
    }
    //设置dataType
    r->m_read.dataType |= (((packet.m_packetType == 0x08) << 2) |
           (packet.m_packetType == 0x09));
    //MessageID为9时,为视频数据,数据太小时。。。
    if (packet.m_packetType == 0x09 && nPacketLen <= 5)
    {
      RTMP_Log(RTMP_LOGDEBUG, "ignoring too small video packet: size: %d",
        nPacketLen);
      ret = RTMP_READ_IGNORE;
      break;
    }
    //MessageID为8时,为音频数据,数据太小时。。。
    if (packet.m_packetType == 0x08 && nPacketLen <= 1)
    {
      RTMP_Log(RTMP_LOGDEBUG, "ignoring too small audio packet: size: %d",
        nPacketLen);
      ret = RTMP_READ_IGNORE;
      break;
    }


    if (r->m_read.flags & RTMP_READ_SEEKING)
    {
      ret = RTMP_READ_IGNORE;
      break;
    }
#ifdef _DEBUG
      RTMP_Log(RTMP_LOGDEBUG, "type: %02X, size: %d, TS: %d ms, abs TS: %d",
    packet.m_packetType, nPacketLen, packet.m_nTimeStamp,
    packet.m_hasAbsTimestamp);
      if (packet.m_packetType == 0x09)
  RTMP_Log(RTMP_LOGDEBUG, "frametype: %02X", (*packetBody & 0xf0));
#endif


    if (r->m_read.flags & RTMP_READ_RESUME)
    {
    /* check the header if we get one */
    //此类packet的timestamp都是0
    if (packet.m_nTimeStamp == 0)
      {
    //messageID=18,数据消息(AMF0)
        if (r->m_read.nMetaHeaderSize > 0
      && packet.m_packetType == 0x12)
    {
    //获取metadata
      AMFObject metaObj;
      int nRes =
        AMF_Decode(&metaObj, packetBody, nPacketLen, FALSE);
      if (nRes >= 0)
        {
          AVal metastring;
          AMFProp_GetString(AMF_GetProp(&metaObj, NULL, 0),
          &metastring);


          if (AVMATCH(&metastring, &av_onMetaData))
      {
        /* compare */
        if ((r->m_read.nMetaHeaderSize != nPacketLen) ||
            (memcmp
             (r->m_read.metaHeader, packetBody,
        r->m_read.nMetaHeaderSize) != 0))
          {
            ret = RTMP_READ_ERROR;
          }
      }
          AMF_Reset(&metaObj);
          if (ret == RTMP_READ_ERROR)
      break;
        }
    }


        /* check first keyframe to make sure we got the right position
         * in the stream! (the first non ignored frame)
         */
        if (r->m_read.nInitialFrameSize > 0)
    {
      /* video or audio data */
      if (packet.m_packetType == r->m_read.initialFrameType
          && r->m_read.nInitialFrameSize == nPacketLen)
        {
          /* we don't compare the sizes since the packet can
           * contain several FLV packets, just make sure the
           * first frame is our keyframe (which we are going
           * to rewrite)
           */
          if (memcmp
        (r->m_read.initialFrame, packetBody,
         r->m_read.nInitialFrameSize) == 0)
      {
        RTMP_Log(RTMP_LOGDEBUG, "Checked keyframe successfully!");
        r->m_read.flags |= RTMP_READ_GOTKF;
        /* ignore it! (what about audio data after it? it is
         * handled by ignoring all 0ms frames, see below)
         */
        ret = RTMP_READ_IGNORE;
        break;
      }
        }


      /* hande FLV streams, even though the server resends the
       * keyframe as an extra video packet it is also included
       * in the first FLV stream chunk and we have to compare
       * it and filter it out !!
       */
      //MessageID=22,聚合消息
      if (packet.m_packetType == 0x16)
        {
          /* basically we have to find the keyframe with the
           * correct TS being nResumeTS
           */
          unsigned int pos = 0;
          uint32_t ts = 0;


          while (pos + 11 < nPacketLen)
      {
        /* size without header (11) and prevTagSize (4) */
        uint32_t dataSize =
          AMF_DecodeInt24(packetBody + pos + 1);
        ts = AMF_DecodeInt24(packetBody + pos + 4);
        ts |= (packetBody[pos + 7] << 24);


#ifdef _DEBUG
        RTMP_Log(RTMP_LOGDEBUG,
            "keyframe search: FLV Packet: type %02X, dataSize: %d, timeStamp: %d ms",
            packetBody[pos], dataSize, ts);
#endif
        /* ok, is it a keyframe?:
         * well doesn't work for audio!
         */
        if (packetBody[pos /*6928, test 0 */ ] ==
            r->m_read.initialFrameType
            /* && (packetBody[11]&0xf0) == 0x10 */ )
          {
            if (ts == r->m_read.nResumeTS)
        {
          RTMP_Log(RTMP_LOGDEBUG,
              "Found keyframe with resume-keyframe timestamp!");
          if (r->m_read.nInitialFrameSize != dataSize
              || memcmp(r->m_read.initialFrame,
            packetBody + pos + 11,
            r->m_read.
            nInitialFrameSize) != 0)
            {
              RTMP_Log(RTMP_LOGERROR,
            "FLV Stream: Keyframe doesn't match!");
              ret = RTMP_READ_ERROR;
              break;
            }
          r->m_read.flags |= RTMP_READ_GOTFLVK;


          /* skip this packet?
           * check whether skippable:
           */
          if (pos + 11 + dataSize + 4 > nPacketLen)
            {
              RTMP_Log(RTMP_LOGWARNING,
            "Non skipable packet since it doesn't end with chunk, stream corrupt!");
              ret = RTMP_READ_ERROR;
              break;
            }
          packetBody += (pos + 11 + dataSize + 4);
          nPacketLen -= (pos + 11 + dataSize + 4);


          goto stopKeyframeSearch;


        }
            else if (r->m_read.nResumeTS < ts)
        {
          /* the timestamp ts will only increase with
           * further packets, wait for seek
           */
          goto stopKeyframeSearch;
        }
          }
        pos += (11 + dataSize + 4);
      }
          if (ts < r->m_read.nResumeTS)
      {
        RTMP_Log(RTMP_LOGERROR,
            "First packet does not contain keyframe, all "
            "timestamps are smaller than the keyframe "
            "timestamp; probably the resume seek failed?");
      }
        stopKeyframeSearch:
          ;
          if (!(r->m_read.flags & RTMP_READ_GOTFLVK))
      {
        RTMP_Log(RTMP_LOGERROR,
            "Couldn't find the seeked keyframe in this chunk!");
        ret = RTMP_READ_IGNORE;
        break;
      }
        }
    }
      }


    if (packet.m_nTimeStamp > 0
        && (r->m_read.flags & (RTMP_READ_GOTKF|RTMP_READ_GOTFLVK)))
      {
        /* another problem is that the server can actually change from
         * 09/08 video/audio packets to an FLV stream or vice versa and
         * our keyframe check will prevent us from going along with the
         * new stream if we resumed.
         *
         * in this case set the 'found keyframe' variables to true.
         * We assume that if we found one keyframe somewhere and were
         * already beyond TS > 0 we have written data to the output
         * which means we can accept all forthcoming data including the
         * change between 08/09 <-> FLV packets
         */
        r->m_read.flags |= (RTMP_READ_GOTKF|RTMP_READ_GOTFLVK);
      }


    /* skip till we find our keyframe
     * (seeking might put us somewhere before it)
     */
    if (!(r->m_read.flags & RTMP_READ_GOTKF) &&
      packet.m_packetType != 0x16)
      {
        RTMP_Log(RTMP_LOGWARNING,
      "Stream does not start with requested frame, ignoring data... ");
        r->m_read.nIgnoredFrameCounter++;
        if (r->m_read.nIgnoredFrameCounter > MAX_IGNORED_FRAMES)
    ret = RTMP_READ_ERROR;  /* fatal error, couldn't continue stream */
        else
    ret = RTMP_READ_IGNORE;
        break;
      }
    /* ok, do the same for FLV streams */
    if (!(r->m_read.flags & RTMP_READ_GOTFLVK) &&
      packet.m_packetType == 0x16)
      {
        RTMP_Log(RTMP_LOGWARNING,
      "Stream does not start with requested FLV frame, ignoring data... ");
        r->m_read.nIgnoredFlvFrameCounter++;
        if (r->m_read.nIgnoredFlvFrameCounter > MAX_IGNORED_FRAMES)
    ret = RTMP_READ_ERROR;
        else
    ret = RTMP_READ_IGNORE;
        break;
      }


    /* we have to ignore the 0ms frames since these are the first
     * keyframes; we've got these so don't mess around with multiple
     * copies sent by the server to us! (if the keyframe is found at a
     * later position there is only one copy and it will be ignored by
     * the preceding if clause)
     */
    if (!(r->m_read.flags & RTMP_READ_NO_IGNORE) &&
      packet.m_packetType != 0x16)
      {     /* exclude type 0x16 (FLV) since it can
         * contain several FLV packets */
        if (packet.m_nTimeStamp == 0)
    {
      ret = RTMP_READ_IGNORE;
      break;
    }
        else
    {
      /* stop ignoring packets */
      r->m_read.flags |= RTMP_READ_NO_IGNORE;
    }
      }
  }


      /* calculate packet size and allocate slop buffer if necessary */
      size = nPacketLen +
  ((packet.m_packetType == 0x08 || packet.m_packetType == 0x09
    || packet.m_packetType == 0x12) ? 11 : 0) +
  (packet.m_packetType != 0x16 ? 4 : 0);


      if (size + 4 > buflen)
  {
    /* the extra 4 is for the case of an FLV stream without a last
     * prevTagSize (we need extra 4 bytes to append it) */
    r->m_read.buf = (char *) malloc(size + 4);
    if (r->m_read.buf == 0)
      {
        RTMP_Log(RTMP_LOGERROR, "Couldn't allocate memory!");
        ret = RTMP_READ_ERROR;    /* fatal error */
        break;
      }
    recopy = TRUE;
    ptr = r->m_read.buf;
  }
      else
  {
    ptr = buf;
  }
      pend = ptr + size + 4;


      /* use to return timestamp of last processed packet */


      /* audio (0x08), video (0x09) or metadata (0x12) packets :
       * construct 11 byte header then add rtmp packet's data */
      if (packet.m_packetType == 0x08 || packet.m_packetType == 0x09
    || packet.m_packetType == 0x12)
  {
    nTimeStamp = r->m_read.nResumeTS + packet.m_nTimeStamp;
    prevTagSize = 11 + nPacketLen;


    *ptr = packet.m_packetType;
    ptr++;
    ptr = AMF_EncodeInt24(ptr, pend, nPacketLen);


#if 0
      if(packet.m_packetType == 0x09) { /* video */


       /* H264 fix: */
       if((packetBody[0] & 0x0f) == 7) { /* CodecId = H264 */
       uint8_t packetType = *(packetBody+1);


       uint32_t ts = AMF_DecodeInt24(packetBody+2); /* composition time */
       int32_t cts = (ts+0xff800000)^0xff800000;
       RTMP_Log(RTMP_LOGDEBUG, "cts  : %d\n", cts);


       nTimeStamp -= cts;
       /* get rid of the composition time */
       CRTMP::EncodeInt24(packetBody+2, 0);
       }
       RTMP_Log(RTMP_LOGDEBUG, "VIDEO: nTimeStamp: 0x%08X (%d)\n", nTimeStamp, nTimeStamp);
       }
#endif


    ptr = AMF_EncodeInt24(ptr, pend, nTimeStamp);
    *ptr = (char)((nTimeStamp & 0xFF000000) >> 24);
    ptr++;


    /* stream id */
    ptr = AMF_EncodeInt24(ptr, pend, 0);
  }


      memcpy(ptr, packetBody, nPacketLen);
      len = nPacketLen;


      /* correct tagSize and obtain timestamp if we have an FLV stream */
      if (packet.m_packetType == 0x16)
  {
    unsigned int pos = 0;
    int delta;


    /* grab first timestamp and see if it needs fixing */
//    nTimeStamp = AMF_DecodeInt24(packetBody + 4);
  //  nTimeStamp |= (packetBody[7] << 24);
//    delta = packet.m_nTimeStamp - nTimeStamp;


    while (pos + 11 < nPacketLen)
      {
        /* size without header (11) and without prevTagSize (4) */
        uint32_t dataSize = AMF_DecodeInt24(packetBody + pos + 1);
        nTimeStamp = AMF_DecodeInt24(packetBody + pos + 4);
        nTimeStamp |= (packetBody[pos + 7] << 24);


//        if (delta)
//    {
//      nTimeStamp += delta;
//      AMF_EncodeInt24(ptr+pos+4, pend, nTimeStamp);
//      ptr[pos+7] = nTimeStamp>>24;
//    }


        /* set data type */
        r->m_read.dataType |= (((*(packetBody + pos) == 0x08) << 2) |
             (*(packetBody + pos) == 0x09));


        if (pos + 11 + dataSize + 4 > nPacketLen)
    {
      if (pos + 11 + dataSize > nPacketLen)
        {
          RTMP_Log(RTMP_LOGERROR,
        "Wrong data size (%lu), stream corrupted, aborting!",
        dataSize);
          ret = RTMP_READ_ERROR;
          break;
        }
      RTMP_Log(RTMP_LOGWARNING, "No tagSize found, appending!");


      /* we have to append a last tagSize! */
      prevTagSize = dataSize + 11;
      AMF_EncodeInt32(ptr + pos + 11 + dataSize, pend,
          prevTagSize);
      size += 4;
      len += 4;
    }
        else
    {
      prevTagSize =
        AMF_DecodeInt32(packetBody + pos + 11 + dataSize);


#ifdef _DEBUG
      RTMP_Log(RTMP_LOGDEBUG,
          "FLV Packet: type %02X, dataSize: %lu, tagSize: %lu, timeStamp: %lu ms",
          (unsigned char)packetBody[pos], dataSize, prevTagSize,
          nTimeStamp);
#endif


      if (prevTagSize != (dataSize + 11))
        {
#ifdef _DEBUG
          RTMP_Log(RTMP_LOGWARNING,
        "Tag and data size are not consitent, writing tag size according to dataSize+11: %d",
        dataSize + 11);
#endif


          prevTagSize = dataSize + 11;
          AMF_EncodeInt32(ptr + pos + 11 + dataSize, pend,
              prevTagSize);
        }
    }


        pos += prevTagSize + 4; /*(11+dataSize+4); */
      }
  }
      ptr += len;


      if (packet.m_packetType != 0x16)
  {
    /* FLV tag packets contain their own prevTagSize */
    AMF_EncodeInt32(ptr, pend, prevTagSize);
  }


      /* In non-live this nTimeStamp can contain an absolute TS.
       * Update ext timestamp with this absolute offset in non-live mode
       * otherwise report the relative one
       */
      /* RTMP_Log(RTMP_LOGDEBUG, "type: %02X, size: %d, pktTS: %dms, TS: %dms, bLiveStream: %d", packet.m_packetType, nPacketLen, packet.m_nTimeStamp, nTimeStamp, r->Link.lFlags & RTMP_LF_LIVE); */
      r->m_read.timestamp = (r->Link.lFlags & RTMP_LF_LIVE) ? packet.m_nTimeStamp : nTimeStamp;


      ret = size;
      break;
    }


  if (rtnGetNextMediaPacket)
    RTMPPacket_Free(&packet);


  if (recopy)
    {
      len = ret > buflen ? buflen : ret;
      memcpy(buf, r->m_read.buf, len);
      r->m_read.bufpos = r->m_read.buf + len;
      r->m_read.buflen = ret - len;
    }
  return ret;
}
函数功能很多,重要的地方已经加上了注释,在此不再细分析。
Read_1_Packet()里面实现从网络中读取视音频数据的函数是RTMP_GetNextMediaPacket()。
下面我们来看看该函数的源代码:
int
RTMP_GetNextMediaPacket(RTMP *r, RTMPPacket *packet)
{
  int bHasMediaPacket = 0;


  while (!bHasMediaPacket && RTMP_IsConnected(r)
   && RTMP_ReadPacket(r, packet))
    {
      if (!RTMPPacket_IsReady(packet))
  {
    continue;
  }


      bHasMediaPacket = RTMP_ClientPacket(r, packet);


      if (!bHasMediaPacket)
  {
    RTMPPacket_Free(packet);
  }
      else if (r->m_pausing == 3)
  {
    if (packet->m_nTimeStamp <= r->m_mediaStamp)
      {
        bHasMediaPacket = 0;
#ifdef _DEBUG
        RTMP_Log(RTMP_LOGDEBUG,
      "Skipped type: %02X, size: %d, TS: %d ms, abs TS: %d, pause: %d ms",
      packet->m_packetType, packet->m_nBodySize,
      packet->m_nTimeStamp, packet->m_hasAbsTimestamp,
      r->m_mediaStamp);
#endif
        continue;
      }
    r->m_pausing = 0;
  }
    }


  if (bHasMediaPacket)
    r->m_bPlaying = TRUE;
  else if (r->m_sb.sb_timedout && !r->m_pausing)
    r->m_pauseStamp = r->m_channelTimestamp[r->m_mediaChannel];


  return bHasMediaPacket;
}
这里有两个函数比较重要:RTMP_ReadPacket()以及RTMP_ClientPacket()。
这两个函数中,前一个函数负责从网络上读取数据,后一个负责处理数据。
这部分与建立RTMP连接的网络流(NetStream)的时候很相似,
参考:RTMPdump(libRTMP) 源代码分析 6: 建立一个流媒体连接 (NetStream部分 1)
RTMP_ClientPacket()在前文中已经做过分析,在此不再重复叙述。在这里重点分析一下RTMP_ReadPacket(),来看看它的源代码。
//读取收下来的Chunk
int
RTMP_ReadPacket(RTMP *r, RTMPPacket *packet)
{
  //packet 存读取完后的的数据
  //Chunk Header最大值18
  uint8_t hbuf[RTMP_MAX_HEADER_SIZE] = { 0 };
  //header 指向的是从Socket中收下来的数据
  char *header = (char *)hbuf;
  int nSize, hSize, nToRead, nChunk;
  int didAlloc = FALSE;


  RTMP_Log(RTMP_LOGDEBUG2, "%s: fd=%d", __FUNCTION__, r->m_sb.sb_socket);
  //收下来的数据存入hbuf
  if (ReadN(r, (char *)hbuf, 1) == 0)
    {
      RTMP_Log(RTMP_LOGERROR, "%s, failed to read RTMP packet header", __FUNCTION__);
      return FALSE;
    }
  //块类型fmt
  packet->m_headerType = (hbuf[0] & 0xc0) >> 6;
  //块流ID(2-63)
  packet->m_nChannel = (hbuf[0] & 0x3f);
  header++;
  //块流ID第1字节为0时,块流ID占2个字节
  if (packet->m_nChannel == 0)
    {
      if (ReadN(r, (char *)&hbuf[1], 1) != 1)
  {
    RTMP_Log(RTMP_LOGERROR, "%s, failed to read RTMP packet header 2nd byte",
        __FUNCTION__);
    return FALSE;
  }
    //计算块流ID(64-319)
      packet->m_nChannel = hbuf[1];
      packet->m_nChannel += 64;
      header++;
    }
  //块流ID第1字节为0时,块流ID占3个字节
  else if (packet->m_nChannel == 1)
    {
      int tmp;
      if (ReadN(r, (char *)&hbuf[1], 2) != 2)
  {
    RTMP_Log(RTMP_LOGERROR, "%s, failed to read RTMP packet header 3nd byte",
        __FUNCTION__);
    return FALSE;
  }
      tmp = (hbuf[2] << 8) + hbuf[1];
    //计算块流ID(64-65599)
      packet->m_nChannel = tmp + 64;
      RTMP_Log(RTMP_LOGDEBUG, "%s, m_nChannel: %0x", __FUNCTION__, packet->m_nChannel);
      header += 2;
    }
  //ChunkHeader的大小(4种)
  nSize = packetSize[packet->m_headerType];


  if (nSize == RTMP_LARGE_HEADER_SIZE)  /* if we get a full header the timestamp is absolute */
    packet->m_hasAbsTimestamp = TRUE; //11字节的完整ChunkMsgHeader的TimeStamp是绝对值


  else if (nSize < RTMP_LARGE_HEADER_SIZE)
    {       /* using values from the last message of this channel */
      if (r->m_vecChannelsIn[packet->m_nChannel])
  memcpy(packet, r->m_vecChannelsIn[packet->m_nChannel],
         sizeof(RTMPPacket));
    }


  nSize--;


  if (nSize > 0 && ReadN(r, header, nSize) != nSize)
    {
      RTMP_Log(RTMP_LOGERROR, "%s, failed to read RTMP packet header. type: %x",
    __FUNCTION__, (unsigned int)hbuf[0]);
      return FALSE;
    }


  hSize = nSize + (header - (char *)hbuf);


  if (nSize >= 3)
    {
  //TimeStamp(注意 BigEndian to SmallEndian)(11,7,3字节首部都有)
      packet->m_nTimeStamp = AMF_DecodeInt24(header);


      /*RTMP_Log(RTMP_LOGDEBUG, "%s, reading RTMP packet chunk on channel %x, headersz %i, timestamp %i, abs timestamp %i", __FUNCTION__, packet.m_nChannel, nSize, packet.m_nTimeStamp, packet.m_hasAbsTimestamp); */
  //消息长度(11,7字节首部都有)
      if (nSize >= 6)
  {
    packet->m_nBodySize = AMF_DecodeInt24(header + 3);
    packet->m_nBytesRead = 0;
    RTMPPacket_Free(packet);
  //(11,7字节首部都有)
    if (nSize > 6)
      {
      //Msg type ID
        packet->m_packetType = header[6];
      //Msg Stream ID
        if (nSize == 11)
    packet->m_nInfoField2 = DecodeInt32LE(header + 7);
      }
  }
    //Extend TimeStamp
      if (packet->m_nTimeStamp == 0xffffff)
  {
    if (ReadN(r, header + nSize, 4) != 4)
      {
        RTMP_Log(RTMP_LOGERROR, "%s, failed to read extended timestamp",
      __FUNCTION__);
        return FALSE;
      }
    packet->m_nTimeStamp = AMF_DecodeInt32(header + nSize);
    hSize += 4;
  }
    }


  RTMP_LogHexString(RTMP_LOGDEBUG2, (uint8_t *)hbuf, hSize);


  if (packet->m_nBodySize > 0 && packet->m_body == NULL)
    {
      if (!RTMPPacket_Alloc(packet, packet->m_nBodySize))
  {
    RTMP_Log(RTMP_LOGDEBUG, "%s, failed to allocate packet", __FUNCTION__);
    return FALSE;
  }
      didAlloc = TRUE;
      packet->m_headerType = (hbuf[0] & 0xc0) >> 6;
    }


  nToRead = packet->m_nBodySize - packet->m_nBytesRead;
  nChunk = r->m_inChunkSize;
  if (nToRead < nChunk)
    nChunk = nToRead;


  /* Does the caller want the raw chunk? */
  if (packet->m_chunk)
    {
      packet->m_chunk->c_headerSize = hSize;
      memcpy(packet->m_chunk->c_header, hbuf, hSize);
      packet->m_chunk->c_chunk = packet->m_body + packet->m_nBytesRead;
      packet->m_chunk->c_chunkSize = nChunk;
    }


  if (ReadN(r, packet->m_body + packet->m_nBytesRead, nChunk) != nChunk)
    {
      RTMP_Log(RTMP_LOGERROR, "%s, failed to read RTMP packet body. len: %lu",
    __FUNCTION__, packet->m_nBodySize);
      return FALSE;
    }


  RTMP_LogHexString(RTMP_LOGDEBUG2, (uint8_t *)packet->m_body + packet->m_nBytesRead, nChunk);


  packet->m_nBytesRead += nChunk;


  /* keep the packet as ref for other packets on this channel */
  if (!r->m_vecChannelsIn[packet->m_nChannel])
    r->m_vecChannelsIn[packet->m_nChannel] = (RTMPPacket *) malloc(sizeof(RTMPPacket));
  memcpy(r->m_vecChannelsIn[packet->m_nChannel], packet, sizeof(RTMPPacket));
  //读取完毕
  if (RTMPPacket_IsReady(packet))
    {
      /* make packet's timestamp absolute */
      if (!packet->m_hasAbsTimestamp)
  packet->m_nTimeStamp += r->m_channelTimestamp[packet->m_nChannel];  /* timestamps seem to be always relative!! */


      r->m_channelTimestamp[packet->m_nChannel] = packet->m_nTimeStamp;


      /* reset the data from the stored packet. we keep the header since we may use it later if a new packet for this channel */
      /* arrives and requests to re-use some info (small packet header) */
      r->m_vecChannelsIn[packet->m_nChannel]->m_body = NULL;
      r->m_vecChannelsIn[packet->m_nChannel]->m_nBytesRead = 0;
      r->m_vecChannelsIn[packet->m_nChannel]->m_hasAbsTimestamp = FALSE;  /* can only be false if we reuse header */
    }
  else
    {
      packet->m_body = NULL;  /* so it won't be erased on free */
    }


  return TRUE;
}
函数代码看似很多,但是并不是很复杂,可以理解为在从事“简单重复性劳动”(和搬砖差不多)。
基本上是一个字节一个字节的读取,然后按照RTMP协议规范进行解析。具体如何解析可以参考RTMP协议规范。
在RTMP_ReadPacket()函数里完成从Socket中读取数据的函数是ReadN(),继续看看它的源代码:
//从HTTP或SOCKET中读取数据
static int
ReadN(RTMP *r, char *buffer, int n)
{
  int nOriginalSize = n;
  int avail;
  char *ptr;


  r->m_sb.sb_timedout = FALSE;


#ifdef _DEBUG
  memset(buffer, 0, n);
#endif


  ptr = buffer;
  while (n > 0)
    {
      int nBytes = 0, nRead;
      if (r->Link.protocol & RTMP_FEATURE_HTTP)
        {
    while (!r->m_resplen)
      {
        if (r->m_sb.sb_size < 144)
          {
      if (!r->m_unackd)
        HTTP_Post(r, RTMPT_IDLE, "", 1);
      if (RTMPSockBuf_Fill(&r->m_sb) < 1)
        {
          if (!r->m_sb.sb_timedout)
            RTMP_Close(r);
          return 0;
        }
    }
        HTTP_read(r, 0);
      }
    if (r->m_resplen && !r->m_sb.sb_size)
      RTMPSockBuf_Fill(&r->m_sb);
          avail = r->m_sb.sb_size;
    if (avail > r->m_resplen)
      avail = r->m_resplen;
  }
      else
        {
          avail = r->m_sb.sb_size;
    if (avail == 0)
      {
        if (RTMPSockBuf_Fill(&r->m_sb) < 1)
          {
            if (!r->m_sb.sb_timedout)
              RTMP_Close(r);
            return 0;
    }
        avail = r->m_sb.sb_size;
      }
  }
      nRead = ((n < avail) ? n : avail);
      if (nRead > 0)
  {
    memcpy(ptr, r->m_sb.sb_start, nRead);
    r->m_sb.sb_start += nRead;
    r->m_sb.sb_size -= nRead;
    nBytes = nRead;
    r->m_nBytesIn += nRead;
    if (r->m_bSendCounter
        && r->m_nBytesIn > r->m_nBytesInSent + r->m_nClientBW / 2)
      SendBytesReceived(r);
  }
      /*RTMP_Log(RTMP_LOGDEBUG, "%s: %d bytes\n", __FUNCTION__, nBytes); */
#ifdef _DEBUG
      fwrite(ptr, 1, nBytes, netstackdump_read);
#endif


      if (nBytes == 0)
  {
    RTMP_Log(RTMP_LOGDEBUG, "%s, RTMP socket closed by peer", __FUNCTION__);
    /*goto again; */
    RTMP_Close(r);
    break;
  }


      if (r->Link.protocol & RTMP_FEATURE_HTTP)
  r->m_resplen -= nBytes;


#ifdef CRYPTO
      if (r->Link.rc4keyIn)
  {
    RC4_encrypt((RC4_KEY *)r->Link.rc4keyIn, nBytes, ptr);
  }
#endif


      n -= nBytes;
      ptr += nBytes;
    }


  return nOriginalSize - n;
}


ReadN()中实现从Socket中接收数据的函数是RTMPSockBuf_Fill(),看看代码吧(又是层层调用)。
//调用Socket编程中的recv()函数,接收数据
int
RTMPSockBuf_Fill(RTMPSockBuf *sb)
{
  int nBytes;


  if (!sb->sb_size)
    sb->sb_start = sb->sb_buf;


  while (1)
    {
  //缓冲区长度:总长-未处理字节-已处理字节
  //|-----已处理--------|-----未处理--------|---------缓冲区----------|
  //sb_buf        sb_start    sb_size     
      nBytes = sizeof(sb->sb_buf) - sb->sb_size - (sb->sb_start - sb->sb_buf);
#if defined(CRYPTO) && !defined(NO_SSL)
      if (sb->sb_ssl)
  {
    nBytes = TLS_read((SSL *)sb->sb_ssl, sb->sb_start + sb->sb_size, nBytes);
  }
      else
#endif
  {
  //int recv( SOCKET s, char * buf, int len, int flags);
  //s:一个标识已连接套接口的描述字。
  //buf:用于接收数据的缓冲区。 
  //len:缓冲区长度。
  //flags:指定调用方式。
  //从sb_start(待处理的下一字节) + sb_size()还未处理的字节开始buffer为空,可以存储
    nBytes = recv(sb->sb_socket, sb->sb_start + sb->sb_size, nBytes, 0);
  }
      if (nBytes != -1)
  {
  //未处理的字节又多了
    sb->sb_size += nBytes;
  }
      else
  {
    int sockerr = GetSockError();
    RTMP_Log(RTMP_LOGDEBUG, "%s, recv returned %d. GetSockError(): %d (%s)",
        __FUNCTION__, nBytes, sockerr, strerror(sockerr));
    if (sockerr == EINTR && !RTMP_ctrlC)
      continue;


    if (sockerr == EWOULDBLOCK || sockerr == EAGAIN)
      {
        sb->sb_timedout = TRUE;
        nBytes = 0;
      }
  }
      break;
    }


  return nBytes;
}


从RTMPSockBuf_Fill()代码中可以看出,调用了系统Socket的recv()函数接收RTMP连接传输过来的数据。


已经连续写了一系列的博客了,其实大部分内容都是去年搞RTMP研究的时候积累的经验,
回顾一下过去的知识,其实RTMPdump(libRTMP)主要的功能也都分析的差不多了,
现在感觉还需要一些查漏补缺。主要就是它是如何处理各种消息(Message)的这方面还没有研究的特明白,在此需要详细研究一下。


再来看一下RTMPdump(libRTMP)的“灵魂”函数RTMP_ClientPacket(),主要完成了各种消息的处理。
//处理接收到的数据
int
RTMP_ClientPacket(RTMP *r, RTMPPacket *packet)
{
  int bHasMediaPacket = 0;
  switch (packet->m_packetType)
    {
  //RTMP消息类型ID=1,设置块大小
    case 0x01:
      /* chunk size */
    //----------------
    r->dlg->AppendCInfo("处理收到的数据。消息 Set Chunk Size (typeID=1)。");
    //-----------------------------
    RTMP_LogPrintf("处理消息 Set Chunk Size (typeID=1)\n");
      HandleChangeChunkSize(r, packet);
      break;
  //RTMP消息类型ID=3,致谢
    case 0x03:
      /* bytes read report */
      RTMP_Log(RTMP_LOGDEBUG, "%s, received: bytes read report", __FUNCTION__);
      break;
  //RTMP消息类型ID=4,用户控制
    case 0x04:
      /* ctrl */
    //----------------
    r->dlg->AppendCInfo("处理收到的数据。消息 User Control (typeID=4)。");
    //-----------------------------
    RTMP_LogPrintf("处理消息 User Control (typeID=4)\n");
      HandleCtrl(r, packet);
      break;
  //RTMP消息类型ID=5
    case 0x05:
      /* server bw */
    //----------------
    r->dlg->AppendCInfo("处理收到的数据。消息 Window Acknowledgement Size (typeID=5)。");
    //-----------------------------
    RTMP_LogPrintf("处理消息 Window Acknowledgement Size (typeID=5)\n");
      HandleServerBW(r, packet);
      break;
  //RTMP消息类型ID=6
    case 0x06:
      /* client bw */
    //----------------
    r->dlg->AppendCInfo("处理收到的数据。消息 Set Peer Bandwidth (typeID=6)。");
    //-----------------------------
    RTMP_LogPrintf("处理消息 Set Peer Bandwidth (typeID=6)\n");
      HandleClientBW(r, packet);
      break;
  //RTMP消息类型ID=8,音频数据
    case 0x08:
      /* audio data */
      /*RTMP_Log(RTMP_LOGDEBUG, "%s, received: audio %lu bytes", __FUNCTION__, packet.m_nBodySize); */
      HandleAudio(r, packet);
      bHasMediaPacket = 1;
      if (!r->m_mediaChannel)
  r->m_mediaChannel = packet->m_nChannel;
      if (!r->m_pausing)
  r->m_mediaStamp = packet->m_nTimeStamp;
      break;
  //RTMP消息类型ID=9,视频数据
    case 0x09:
      /* video data */
      /*RTMP_Log(RTMP_LOGDEBUG, "%s, received: video %lu bytes", __FUNCTION__, packet.m_nBodySize); */
      HandleVideo(r, packet);
      bHasMediaPacket = 1;
      if (!r->m_mediaChannel)
  r->m_mediaChannel = packet->m_nChannel;
      if (!r->m_pausing)
  r->m_mediaStamp = packet->m_nTimeStamp;
      break;
  //RTMP消息类型ID=15,AMF3编码,忽略
    case 0x0F:      /* flex stream send */
      RTMP_Log(RTMP_LOGDEBUG,
    "%s, flex stream send, size %lu bytes, not supported, ignoring",
    __FUNCTION__, packet->m_nBodySize);
      break;
  //RTMP消息类型ID=16,AMF3编码,忽略
    case 0x10:      /* flex shared object */
      RTMP_Log(RTMP_LOGDEBUG,
    "%s, flex shared object, size %lu bytes, not supported, ignoring",
    __FUNCTION__, packet->m_nBodySize);
      break;
  //RTMP消息类型ID=17,AMF3编码,忽略
    case 0x11:      /* flex message */
      {
  RTMP_Log(RTMP_LOGDEBUG,
      "%s, flex message, size %lu bytes, not fully supported",
      __FUNCTION__, packet->m_nBodySize);
  /*RTMP_LogHex(packet.m_body, packet.m_nBodySize); */


  /* some DEBUG code */
#if 0
     RTMP_LIB_AMFObject obj;
     int nRes = obj.Decode(packet.m_body+1, packet.m_nBodySize-1);
     if(nRes < 0) {
     RTMP_Log(RTMP_LOGERROR, "%s, error decoding AMF3 packet", __FUNCTION__);
     /*return; */
     }


     obj.Dump();
#endif


  if (HandleInvoke(r, packet->m_body + 1, packet->m_nBodySize - 1) == 1)
    bHasMediaPacket = 2;
  break;
      }
  //RTMP消息类型ID=18,AMF0编码,数据消息
    case 0x12:
      /* metadata (notify) */


      RTMP_Log(RTMP_LOGDEBUG, "%s, received: notify %lu bytes", __FUNCTION__,
    packet->m_nBodySize);
    //处理元数据,暂时注释
    /*
      if (HandleMetadata(r, packet->m_body, packet->m_nBodySize))
  bHasMediaPacket = 1;
      break;
    */
  //RTMP消息类型ID=19,AMF0编码,忽略
    case 0x13:
      RTMP_Log(RTMP_LOGDEBUG, "%s, shared object, not supported, ignoring",
    __FUNCTION__);
      break;
  //RTMP消息类型ID=20,AMF0编码,命令消息
  //处理命令消息!
    case 0x14:
    //----------------
    r->dlg->AppendCInfo("处理收到的数据。消息 命令 (AMF0编码) (typeID=20)。");
    //-----------------------------
      /* invoke */
      RTMP_Log(RTMP_LOGDEBUG, "%s, received: invoke %lu bytes", __FUNCTION__,
    packet->m_nBodySize);
    RTMP_LogPrintf("处理命令消息 (typeID=20,AMF0编码)\n");
      /*RTMP_LogHex(packet.m_body, packet.m_nBodySize); */


      if (HandleInvoke(r, packet->m_body, packet->m_nBodySize) == 1)
  bHasMediaPacket = 2;
      break;
  //RTMP消息类型ID=22
    case 0x16:
      {
  /* go through FLV packets and handle metadata packets */
  unsigned int pos = 0;
  uint32_t nTimeStamp = packet->m_nTimeStamp;


  while (pos + 11 < packet->m_nBodySize)
    {
      uint32_t dataSize = AMF_DecodeInt24(packet->m_body + pos + 1);  /* size without header (11) and prevTagSize (4) */


      if (pos + 11 + dataSize + 4 > packet->m_nBodySize)
        {
    RTMP_Log(RTMP_LOGWARNING, "Stream corrupt?!");
    break;
        }
      if (packet->m_body[pos] == 0x12)
        {
    HandleMetadata(r, packet->m_body + pos + 11, dataSize);
        }
      else if (packet->m_body[pos] == 8 || packet->m_body[pos] == 9)
        {
    nTimeStamp = AMF_DecodeInt24(packet->m_body + pos + 4);
    nTimeStamp |= (packet->m_body[pos + 7] << 24);
        }
      pos += (11 + dataSize + 4);
    }
  if (!r->m_pausing)
    r->m_mediaStamp = nTimeStamp;


  /* FLV tag(s) */
  /*RTMP_Log(RTMP_LOGDEBUG, "%s, received: FLV tag(s) %lu bytes", __FUNCTION__, packet.m_nBodySize); */
  bHasMediaPacket = 1;
  break;
      }
    default:
      RTMP_Log(RTMP_LOGDEBUG, "%s, unknown packet type received: 0x%02x", __FUNCTION__,
    packet->m_packetType);
#ifdef _DEBUG
      RTMP_LogHex(RTMP_LOGDEBUG, (const uint8_t *)packet->m_body, packet->m_nBodySize);
#endif
    }


  return bHasMediaPacket;
}


前文已经分析过当消息类型ID为0x14(20)的时候,即AMF0编码的命令消息的时候,会调用HandleInvoke()进行处理。
参考:RTMPdump(libRTMP) 源代码分析 7: 建立一个流媒体连接 (NetStream部分 2)


这里就不再对这种类型ID的消息进行分析了,分析一下其他类型的消息,毕竟从发起一个RTMP连接到接收视音频数据这个过程中是要处理很多消息的。


参考:RTMP流媒体播放过程


下面我们按照消息ID从小到大的顺序,看看接收到的各种消息都是如何处理的。


消息类型ID是0x01的消息功能是“设置块(Chunk)大小”,处理函数是HandleChangeChunkSize(),可见函数内容很简单。


static void
HandleChangeChunkSize(RTMP *r, const RTMPPacket *packet)
{
  if (packet->m_nBodySize >= 4)
    {
      r->m_inChunkSize = AMF_DecodeInt32(packet->m_body);
      RTMP_Log(RTMP_LOGDEBUG, "%s, received: chunk size change to %d", __FUNCTION__,
    r->m_inChunkSize);
    }
}


消息类型ID是0x03的消息功能是“致谢”,没有处理函数。
消息类型ID是0x04的消息功能是“用户控制(UserControl)”,处理函数是HandleCtrl(),这类的消息出现的频率非常高,函数体如下所示。具体用户控制消息的作用这里就不多说了,有相应的文档可以参考。
注:该函数中间有一段很长的英文注释,英语好的大神可以看一看
//处理用户控制(UserControl)消息。用户控制消息是服务器端发出的。
static void
HandleCtrl(RTMP *r, const RTMPPacket *packet)
{
  short nType = -1;
  unsigned int tmp;
  if (packet->m_body && packet->m_nBodySize >= 2)
  //事件类型(2B)
    nType = AMF_DecodeInt16(packet->m_body);
  RTMP_Log(RTMP_LOGDEBUG, "%s, received ctrl. type: %d, len: %d", __FUNCTION__, nType,
      packet->m_nBodySize);
  /*RTMP_LogHex(packet.m_body, packet.m_nBodySize); */


  if (packet->m_nBodySize >= 6)
    {
  //不同事件类型做不同处理
      switch (nType)
  {
  //流开始
  case 0:
  //流ID
    tmp = AMF_DecodeInt32(packet->m_body + 2);
    RTMP_Log(RTMP_LOGDEBUG, "%s, Stream Begin %d", __FUNCTION__, tmp);
    break;
  //流结束
  case 1:
  //流ID
    tmp = AMF_DecodeInt32(packet->m_body + 2);
    RTMP_Log(RTMP_LOGDEBUG, "%s, Stream EOF %d", __FUNCTION__, tmp);
    if (r->m_pausing == 1)
      r->m_pausing = 2;
    break;
  //流枯竭
  case 2:
  //流ID
    tmp = AMF_DecodeInt32(packet->m_body + 2);
    RTMP_Log(RTMP_LOGDEBUG, "%s, Stream Dry %d", __FUNCTION__, tmp);
    break;
  //是录制流
  case 4:
    tmp = AMF_DecodeInt32(packet->m_body + 2);
    RTMP_Log(RTMP_LOGDEBUG, "%s, Stream IsRecorded %d", __FUNCTION__, tmp);
    break;
  //Ping客户端
  case 6:   /* server ping. reply with pong. */
    tmp = AMF_DecodeInt32(packet->m_body + 2);
    RTMP_Log(RTMP_LOGDEBUG, "%s, Ping %d", __FUNCTION__, tmp);
    RTMP_SendCtrl(r, 0x07, tmp, 0);
    break;


  /* FMS 3.5 servers send the following two controls to let the client
   * know when the server has sent a complete buffer. I.e., when the
   * server has sent an amount of data equal to m_nBufferMS in duration.
   * The server meters its output so that data arrives at the client
   * in realtime and no faster.
   *
   * The rtmpdump program tries to set m_nBufferMS as large as
   * possible, to force the server to send data as fast as possible.
   * In practice, the server appears to cap this at about 1 hour's
   * worth of data. After the server has sent a complete buffer, and
   * sends this BufferEmpty message, it will wait until the play
   * duration of that buffer has passed before sending a new buffer.
   * The BufferReady message will be sent when the new buffer starts.
   * (There is no BufferReady message for the very first buffer;
   * presumably the Stream Begin message is sufficient for that
   * purpose.)
   *
   * If the network speed is much faster than the data bitrate, then
   * there may be long delays between the end of one buffer and the
   * start of the next.
   *
   * Since usually the network allows data to be sent at
   * faster than realtime, and rtmpdump wants to download the data
   * as fast as possible, we use this RTMP_LF_BUFX hack: when we
   * get the BufferEmpty message, we send a Pause followed by an
   * Unpause. This causes the server to send the next buffer immediately
   * instead of waiting for the full duration to elapse. (That's
   * also the purpose of the ToggleStream function, which rtmpdump
   * calls if we get a read timeout.)
   *
   * Media player apps don't need this hack since they are just
   * going to play the data in realtime anyway. It also doesn't work
   * for live streams since they obviously can only be sent in
   * realtime. And it's all moot if the network speed is actually
   * slower than the media bitrate.
   */
  case 31:
    tmp = AMF_DecodeInt32(packet->m_body + 2);
    RTMP_Log(RTMP_LOGDEBUG, "%s, Stream BufferEmpty %d", __FUNCTION__, tmp);
    if (!(r->Link.lFlags & RTMP_LF_BUFX))
      break;
    if (!r->m_pausing)
      {
        r->m_pauseStamp = r->m_channelTimestamp[r->m_mediaChannel];
        RTMP_SendPause(r, TRUE, r->m_pauseStamp);
        r->m_pausing = 1;
      }
    else if (r->m_pausing == 2)
      {
        RTMP_SendPause(r, FALSE, r->m_pauseStamp);
        r->m_pausing = 3;
      }
    break;


  case 32:
    tmp = AMF_DecodeInt32(packet->m_body + 2);
    RTMP_Log(RTMP_LOGDEBUG, "%s, Stream BufferReady %d", __FUNCTION__, tmp);
    break;


  default:
    tmp = AMF_DecodeInt32(packet->m_body + 2);
    RTMP_Log(RTMP_LOGDEBUG, "%s, Stream xx %d", __FUNCTION__, tmp);
    break;
  }


    }


  if (nType == 0x1A)
    {
      RTMP_Log(RTMP_LOGDEBUG, "%s, SWFVerification ping received: ", __FUNCTION__);
      if (packet->m_nBodySize > 2 && packet->m_body[2] > 0x01)
  {
    RTMP_Log(RTMP_LOGERROR,
            "%s: SWFVerification Type %d request not supported! Patches welcome...",
      __FUNCTION__, packet->m_body[2]);
  }
#ifdef CRYPTO
      /*RTMP_LogHex(packet.m_body, packet.m_nBodySize); */


      /* respond with HMAC SHA256 of decompressed SWF, key is the 30byte player key, also the last 30 bytes of the server handshake are applied */
      else if (r->Link.SWFSize)
  {
    RTMP_SendCtrl(r, 0x1B, 0, 0);
  }
      else
  {
    RTMP_Log(RTMP_LOGERROR,
        "%s: Ignoring SWFVerification request, use --swfVfy!",
        __FUNCTION__);
  }
#else
      RTMP_Log(RTMP_LOGERROR,
    "%s: Ignoring SWFVerification request, no CRYPTO support!",
    __FUNCTION__);
#endif
    }
}


消息类型ID是0x05的消息功能是“窗口致谢大小(Window Acknowledgement Size,翻译的真是挺别扭)”,处理函数是HandleServerBW()。在这里注意一下,该消息在Adobe官方公开的文档中叫“Window Acknowledgement Size”,但是在Adobe公开协议规范之前,破解RTMP协议的组织一直管该协议叫“ServerBW”,只是个称呼,倒是也无所谓~处理代码很简单:


static void
HandleServerBW(RTMP *r, const RTMPPacket *packet)
{
  r->m_nServerBW = AMF_DecodeInt32(packet->m_body);
  RTMP_Log(RTMP_LOGDEBUG, "%s: server BW = %d", __FUNCTION__, r->m_nServerBW);
}


消息类型ID是0x06的消息功能是“设置对等端带宽(Set Peer Bandwidth)”,处理函数是HandleClientBW()。与上一种消息一样,该消息在Adobe官方公开的文档中叫“Set Peer Bandwidth”,但是在Adobe公开协议规范之前,破解RTMP协议的组织一直管该协议叫“ClientBW”。处理函数也不复杂:
static void
HandleClientBW(RTMP *r, const RTMPPacket *packet)
{
  r->m_nClientBW = AMF_DecodeInt32(packet->m_body);
  if (packet->m_nBodySize > 4)
    r->m_nClientBW2 = packet->m_body[4];
  else
    r->m_nClientBW2 = -1;
  RTMP_Log(RTMP_LOGDEBUG, "%s: client BW = %d %d", __FUNCTION__, r->m_nClientBW,
      r->m_nClientBW2);
}
消息类型ID是0x08的消息用于传输音频数据,在这里不处理。
消息类型ID是0x09的消息用于传输音频数据,在这里不处理。
消息类型ID是0x0F-11的消息用于传输AMF3编码的命令。
消息类型ID是0x12-14的消息用于传输AMF0编码的命令。
注:消息类型ID是0x14的消息很重要,用于传输AMF0编码的命令,已经做过分析。


原文链接:
http://blog.csdn.net/leixiaohua1020/article/details/12952977




阅读(10319) | 评论(0) | 转发(2) |
给主人留下些什么吧!~~