Chinaunix首页 | 论坛 | 博客
  • 博客访问: 175159
  • 博文数量: 28
  • 博客积分: 817
  • 博客等级: 军士长
  • 技术积分: 947
  • 用 户 组: 普通用户
  • 注册时间: 2010-07-16 15:32
文章分类
文章存档

2012年(4)

2011年(18)

2010年(6)

我的朋友

分类: LINUX

2011-11-26 11:56:21

------------------------------------------
本文系本站原创,欢迎转载!
转载请注明出处:amingriyue.blog.chinaunix.net
------------------------------------------

在android中,pmem是特地从内存中划出来的一块,给android的用户空间需要物理上连续的进程使用。
我们首先分析pmem驱动。
pmem使用的是platform bus注册的。
1,device部分:
struct platform_device mxc_android_pmem_device = {
    .name = "android_pmem",
    .id = 0,
};
其data部分:
static struct android_pmem_platform_data android_pmem_pdata = {
    .name = "pmem_adsp",
    .start = 0,
    .size = SZ_32M,
    .no_allocator = 0,
    .cached = PMEM_NONCACHE_NORMAL,
};
android_pmem_pdata.start在fixup_mxc_board中重新计算了:
        size = t->u.mem.size;

        android_pmem_pdata.start =
                PHYS_OFFSET + size - android_pmem_pdata.size;

2,driver部分:
在drivers/misc/pmem.c定义:
static struct platform_driver pmem_driver = {
    .probe = pmem_probe,
    .remove = pmem_remove,
    .driver = { .name = "android_pmem" }
};
device和driver匹配后将执行pmem_probe:
static int pmem_probe(struct platform_device *pdev)
{
    struct android_pmem_platform_data *pdata;

    if (!pdev || !pdev->dev.platform_data) {
        printk(KERN_ALERT "Unable to probe pmem!\n");
        return -1;
    }
    pdata = pdev->dev.platform_data;
    return pmem_setup(pdata, NULL, NULL);//2-1
}
2-1:pmem_setup(pdata, NULL, NULL):
int pmem_setup(struct android_pmem_platform_data *pdata,
           long (*ioctl)(struct file *, unsigned int, unsigned long),
           int (*release)(struct inode *, struct file *))
{
    int err = 0;
    int i, index = 0;
    int id = id_count;
    id_count++;

    pmem[id].no_allocator = pdata->no_allocator; //为0
    pmem[id].cached = pdata->cached; //PMEM_NONCACHE_NORMAL
    pmem[id].buffered = pdata->buffered;
    pmem[id].base = pdata->start;//开始位置
    pmem[id].size = pdata->size;//大小
    pmem[id].ioctl = ioctl;
    pmem[id].release = release;
    init_rwsem(&pmem[id].bitmap_sem);
    init_MUTEX(&pmem[id].data_list_sem);
    INIT_LIST_HEAD(&pmem[id].data_list);
    pmem[id].dev.name = pdata->name;
    pmem[id].dev.minor = id;//次设备好
    pmem[id].dev.fops = &pmem_fops;//fops
    printk(KERN_INFO "%s: %d init\n", pdata->name, pdata->cached);

    err = misc_register(&pmem[id].dev);//注册pmem设备
    if (err) {
        printk(KERN_ALERT "Unable to register pmem driver!\n");
        goto err_cant_register_device;
    }
    pmem[id].num_entries = pmem[id].size / PMEM_MIN_ALLOC;//有多少页

    pmem[id].bitmap = kmalloc(pmem[id].num_entries * //申请这么多个pmem_bits
                  sizeof(struct pmem_bits), GFP_KERNEL);
    if (!pmem[id].bitmap)
        goto err_no_mem_for_metadata;

    memset(pmem[id].bitmap, 0, sizeof(struct pmem_bits) *
                      pmem[id].num_entries);

    for (i = sizeof(pmem[id].num_entries) * 8 - 1; i >= 0; i--) {
        if ((pmem[id].num_entries) &  1<            PMEM_ORDER(id, index) = i;
            index = PMEM_NEXT_INDEX(id, index);
        }
    }

    if (pmem[id].cached)
        pmem[id].vbase = ioremap_cached(pmem[id].base,
                        pmem[id].size);
#ifdef ioremap_ext_buffered
    else if (pmem[id].buffered)
        pmem[id].vbase = ioremap_ext_buffered(pmem[id].base,
                              pmem[id].size);
#endif
    else
        pmem[id].vbase = ioremap(pmem[id].base, pmem[id].size);//映射pmem的基地址

    if (pmem[id].vbase == 0)
        goto error_cant_remap;

    pmem[id].garbage_pfn = page_to_pfn(alloc_page(GFP_KERNEL));
    if (pmem[id].no_allocator)
        pmem[id].allocated = 0;

#if PMEM_DEBUG
    debugfs_create_file(pdata->name, S_IFREG | S_IRUGO, NULL, (void *)id,
                &debug_fops);
#endif
    return 0;
error_cant_remap:
    kfree(pmem[id].bitmap);
err_no_mem_for_metadata:
    misc_deregister(&pmem[id].dev);
err_cant_register_device:
    return -1;
}
我们发现pmem_setup将pmem初始化后注册为misc设备。我们后面上层对pmem访问就是对该misc设备的操作,所以fops非常重要:
struct file_operations pmem_fops = {
    .release = pmem_release,
    .mmap = pmem_mmap,
    .open = pmem_open,
    .unlocked_ioctl = pmem_ioctl,
};
我们首先看下pmem_open:
static int pmem_open(struct inode *inode, struct file *file)
{
    struct pmem_data *data;//pmem_data结构,每打开一次新建一个pmem_data
    int id = get_id(file);
    int ret = 0;

    DLOG("current %u file %p(%d)\n", current->pid, file, file_count(file));
    /* setup file->private_data to indicate its unmapped */
    /*  you can only open a pmem device one time */
    if (file->private_data != NULL)
        return -1;
    data = kmalloc(sizeof(struct pmem_data), GFP_KERNEL);
    if (!data) {
        printk("pmem: unable to allocate memory for pmem metadata.");
        return -1;
    }
    data->flags = 0;
    data->index = -1;
    data->task = NULL;
    data->vma = NULL;
    data->pid = 0;
    data->master_file = NULL;
#if PMEM_DEBUG
    data->ref = 0;
#endif
    INIT_LIST_HEAD(&data->region_list);
    init_rwsem(&data->sem);

    file->private_data = data;//赋值
    INIT_LIST_HEAD(&data->list);

    down(&pmem[id].data_list_sem);
    list_add(&data->list, &pmem[id].data_list);//放到data_list
    up(&pmem[id].data_list_sem);
    return ret;
}
我们看到每打开一个pmem,将有一个pmem_data添加到pmem[id].data_list。并将该pmem_data赋值给file->private_data。
我们在看下pmem_mmap:
static int pmem_mmap(struct file *file, struct vm_area_struct *vma)//这里vma是系统调用时从用户空间找到的一块空闲虚拟空间,用来映射pmem空间
{
    struct pmem_data *data;
    int index;
    unsigned long vma_size =  vma->vm_end - vma->vm_start;
    int ret = 0, id = get_id(file);

    if (vma->vm_pgoff || !PMEM_IS_PAGE_ALIGNED(vma_size)) {
#if PMEM_DEBUG
        printk(KERN_ERR "pmem: mmaps must be at offset zero, aligned"
                " and a multiple of pages_size.\n");
#endif
        return -EINVAL;
    }

    data = (struct pmem_data *)file->private_data;//打开时保存的pmem_data
    down_write(&data->sem);
    /* check this file isn't already mmaped, for submaps check this file
     * has never been mmaped */
    if ((data->flags & PMEM_FLAGS_MASTERMAP) ||
        (data->flags & PMEM_FLAGS_SUBMAP) ||
        (data->flags & PMEM_FLAGS_UNSUBMAP)) {
#if PMEM_DEBUG
        printk(KERN_ERR "pmem: you can only mmap a pmem file once, "
               "this file is already mmaped. %x\n", data->flags);
#endif
        ret = -EINVAL;
        goto error;
    }
    /* if file->private_data == unalloced, alloc*/
    if (data && data->index == -1) {
        down_write(&pmem[id].bitmap_sem);
        index = pmem_allocate(id, vma->vm_end - vma->vm_start);//从pmem中申请一块内存
        up_write(&pmem[id].bitmap_sem);
        data->index = index;
    }
    /* either no space was available or an error occured */
    if (!has_allocation(file)) {
        ret = -EINVAL;
        printk("pmem: could not find allocation for map.\n");
        goto error;
    }

    if (pmem_len(id, data) < vma_size) {
#if PMEM_DEBUG
        printk(KERN_WARNING "pmem: mmap size [%lu] does not match"
               "size of backing region [%lu].\n", vma_size,
               pmem_len(id, data));
#endif
        ret = -EINVAL;
        goto error;
    }

    vma->vm_pgoff = pmem_start_addr(id, data) >> PAGE_SHIFT;//pmem的物理地址
    vma->vm_page_prot = phys_mem_access_prot(file, vma->vm_page_prot);//映射的标记

    if (data->flags & PMEM_FLAGS_CONNECTED) {//该data标记说明是不同进程共享同一块区域
        struct pmem_region_node *region_node;
        struct list_head *elt;
        if (pmem_map_garbage(id, vma, data, 0, vma_size)) {
            printk("pmem: mmap failed in kernel!\n");
            ret = -EAGAIN;
            goto error;
        }
        list_for_each(elt, &data->region_list) {//从region_list寻找region_node
            region_node = list_entry(elt, struct pmem_region_node,
                         list);
            DLOG("remapping file: %p %lx %lx\n", file,
                region_node->region.offset,
                region_node->region.len);
            if (pmem_remap_pfn_range(id, vma, data,  //为分配的pmem建立页表
                         region_node->region.offset,
                         region_node->region.len)) {
                ret = -EAGAIN;
                goto error;
            }
        }
        data->flags |= PMEM_FLAGS_SUBMAP;//与其它进程共享
        get_task_struct(current->group_leader);
        data->task = current->group_leader;
        data->vma = vma;
#if PMEM_DEBUG
        data->pid = current->pid;
#endif
        DLOG("submmapped file %p vma %p pid %u\n", file, vma,
             current->pid);
    } else {
        if (pmem_map_pfn_range(id, vma, data, 0, vma_size)) {////为分配的pmem建立页表
            printk(KERN_INFO "pmem: mmap failed in kernel!\n");
            ret = -EAGAIN;
            goto error;
        }
        data->flags |= PMEM_FLAGS_MASTERMAP;//第一次映射,即暂时独享
        data->pid = current->pid;
    }
    vma->vm_ops = &vm_ops;
error:
    up_write(&data->sem);
    return ret;
}
pmem_mmap主要将申请的虚拟用户空间映射到pmem分配的指定空间,利用pmem_map_pfn_range创建页表。
下面看下pmem_ioctl:
static long pmem_ioctl(struct file *file, unsigned int cmd, unsigned long arg)
{
    struct pmem_data *data;
    int id = get_id(file);

    switch (cmd) {
    case PMEM_GET_PHYS://得到物理参数,如果物理地址,数据长度
        {
            struct pmem_region region;
            DLOG("get_phys\n");
            if (!has_allocation(file)) {
                region.offset = 0;
                region.len = 0;
            } else {
                data = (struct pmem_data *)file->private_data;
                region.offset = pmem_start_addr(id, data);
                region.len = pmem_len(id, data);
            }
            printk(KERN_INFO "pmem: request for physical address of pmem region "
                    "from process %d.\n", current->pid);
            if (copy_to_user((void __user *)arg, ®ion,
                        sizeof(struct pmem_region)))
                return -EFAULT;
            break;
        }
    case PMEM_MAP://映射
        {
            struct pmem_region region;
            if (copy_from_user(®ion, (void __user *)arg,
                        sizeof(struct pmem_region)))
                return -EFAULT;
            data = (struct pmem_data *)file->private_data;
            return pmem_remap(®ion, file, PMEM_MAP);
        }
        break;
    case PMEM_UNMAP://解映射
        {
            struct pmem_region region;
            if (copy_from_user(®ion, (void __user *)arg,
                        sizeof(struct pmem_region)))
                return -EFAULT;
            data = (struct pmem_data *)file->private_data;
            return pmem_remap(®ion, file, PMEM_UNMAP);
            break;
        }
    case PMEM_GET_SIZE://得到大小
        {
            struct pmem_region region;
            DLOG("get_size\n");
            pmem_get_size(®ion, file);
            if (copy_to_user((void __user *)arg, ®ion,
                        sizeof(struct pmem_region)))
                return -EFAULT;
            break;
        }
    case PMEM_GET_TOTAL_SIZE://得到总的pmem大小
        {
            struct pmem_region region;
            DLOG("get total size\n");
            region.offset = 0;
            get_id(file);
            region.len = pmem[id].size;
            if (copy_to_user((void __user *)arg, ®ion,
                        sizeof(struct pmem_region)))
                return -EFAULT;
            break;
        }
    case PMEM_ALLOCATE://申请一块pmem内存
        {
            if (has_allocation(file))
                return -EINVAL;
            data = (struct pmem_data *)file->private_data;
            data->index = pmem_allocate(id, arg);
            break;
        }
    case PMEM_CONNECT://共享pmem内存
        DLOG("connect\n");
        return pmem_connect(arg, file);
        break;
    case PMEM_CACHE_FLUSH:
        {
            struct pmem_region region;
            DLOG("flush\n");
            if (copy_from_user(®ion, (void __user *)arg,
                       sizeof(struct pmem_region)))
                return -EFAULT;
            flush_pmem_file(file, region.offset, region.len);
            break;
        }
    default:
        if (pmem[id].ioctl)
            return pmem[id].ioctl(file, cmd, arg);
        return -EINVAL;
    }
    return 0;
}
ioctl主要是要搞清楚各个命令的含义。
下面我们看下具体应用,hardware/mx5x/libcamera/Camera_pmem.cpp:
PmemAllocator::PmemAllocator(int bufCount, int bufSize)://构造函数,调用pmem分配时候,将传入两个值分别是 bufCount和bufSize,从变量的名字我们就差不多知道他们意思了。
    err_ret(0), mFD(0),mTotalSize(0),mBufCount(bufCount),mBufSize(bufSize),//初始化变量
    mVirBase(NULL),mPhyBase(NULL)
{
    LOG_FUNCTION_NAME;
    memset(mSlotAllocated, 0, sizeof(bool)*MAX_SLOT);//将所有槽的标记清0,它将pmem默认分为MAX_SLOT份,这个分法我认为不是很严谨,容易溢出

    int err;
    struct pmem_region region;
    mFD = open(PMEM_DEV, O_RDWR);//打开pmem设备,就是上面驱动中注册的misc设备
    if (mFD < 0) {
         LOGE("Error!PmemAllocator constructor");
         err_ret = -1;
         return;
    }

    err = ioctl(mFD, PMEM_GET_TOTAL_SIZE, ®ion);//得到总的pmem大小
    if (err == 0)
    {
         LOGE("Info!get pmem total size %d",(int)region.len);
    }
    else
    {
        LOGE("Error!Cannot get total length in PmemAllocator constructor");
        err_ret = -1;
        return;
    }
   
    mBufSize = (bufSize + DEFAULT_PMEM_ALIGN-1) & ~(DEFAULT_PMEM_ALIGN-1);//要页对齐的
   
    mTotalSize = mBufSize*bufCount;//要申请的大小
    if((mTotalSize > region.len)||(mBufCount > MAX_SLOT)) {//判断pmem是否能满足需求大小
        LOGE("Error!Out of PmemAllocator capability");
    }
    else
    {
        uint8_t *virtualbase = (uint8_t*)mmap(0, mTotalSize,//映射申请的大小,有上面pmem分析得知,它将会把用户空间映射到pmem区域
            PROT_READ|PROT_WRITE, MAP_SHARED, mFD, 0);

        if (virtualbase == MAP_FAILED) {
           LOGE("Error!mmap(fd=%d, size=%u) failed (%s)",
                   mFD, (unsigned int)mTotalSize, strerror(errno));
           return;
        }

        memset(®ion, 0, sizeof(region));
   
        if (ioctl(mFD, PMEM_GET_PHYS, ®ion) == -1)//得到映射的物理参数,如物理地址,映射长度
        {
          LOGE("Error!Failed to get physical address of source!\n");
          munmap(virtualbase, mTotalSize);
          return;
        }
        mVirBase = (void *)virtualbase;//赋值给全局变量
        mPhyBase = region.offset;//就是刚才得到的物理参数中的
        LOGV("Allocator total size %d, vir addr 0x%x, phy addr 0x%x",mTotalSize,mVirBase,mPhyBase);
    }
}

PmemAllocator::~PmemAllocator()//析构函数
{
    LOG_FUNCTION_NAME;

    for(int index=0;index < MAX_SLOT;index ++) {
        if(mSlotAllocated[index]) {
            LOGE("Error!Cannot deinit PmemAllocator before all memory back to allocator");
        }
    }

    if(mVirBase) {
        munmap(mVirBase, mTotalSize);
    }
    if(mFD) {
        close(mFD);
    }
    
}

int PmemAllocator::allocate(struct picbuffer *pbuf, int size)//这个函数的功能是从构造函数中申请的pmem中分配一块bufSize大小的内存
{

    LOG_FUNCTION_NAME;

    if((!mVirBase)||(!pbuf)||(size>mBufSize)) {//一般size等于mBufSize
        LOGE("Error!No memory for allocator");
        return -1;
    }

    for(int index=0;index < MAX_SLOT;index ++) {
        if(!mSlotAllocated[index]) {//找到还没被使用的一块
            LOGE("Free slot %d for allocating mBufSize %d request size %d",
                             index,mBufSize,size);

            pbuf->virt_start= (unsigned char *)mVirBase+index*mBufSize;
            pbuf->phy_offset= mPhyBase+index*mBufSize;
            pbuf->length= mBufSize;
            mSlotAllocated[index] = true;//置上被使用的标记
            return 0;
        }
    }
    return -1;
}

int PmemAllocator::deAllocate(struct picbuffer *pbuf)//删除allocate分配的buffer使用
{
    LOG_FUNCTION_NAME;
    if((!mVirBase)||(!pbuf)) {
        LOGE("Error!No memory for allocator");
        return -1;
    }
    int nSlot = ((unsigned int)pbuf->virt_start- (unsigned int)mVirBase)/mBufSize;
    if((nSlot        LOGE("Info!deAllocate for slot %d",nSlot);
        mSlotAllocated[nSlot] = false;
        return 0;
    }
    else{
        LOGE("Error!Not a valid buffer");
        return -1;
    }
}

uvc camera中使用实例:
#ifdef UVC_CAMERA
    {
        mPmemAllocator = new PmemAllocator(1, target_size);//实例化PmemAllocator,将调用其构造函数,将向pmem申请1*target_size大小的内存
        if(mPmemAllocator->allocate(&(mIPUprocessbuf[0]),target_size) < 0){//申请一块target_size大小的pmem
            LOGE("allocate the %d buffer for ipu process error", i);
            goto  exit1;
        }
        IPUConverter(mCaptureBuffers[cfilledbuffer.index].phy_offset,mIPUprocessbuf[0].phy_offset,
            mPictureWidth,mPictureHeight,mPictureWidth,mPictureHeight,V4L2_PIX_FMT_YUYV,V4L2_PIX_FMT_YUV420);
    }
        buf1 = mIPUprocessbuf[0].virt_start;
#endif
#ifdef UVC_CAMERA
         if (mPmemAllocator != NULL){
            mPmemAllocator->deAllocate(&mIPUprocessbuf[0]);//释放刚申请的一块target_size大小的pmem
            mPmemAllocator = NULL;
            }
         if (mCameraOpened != 0)
             cameraClose();
#endif
好了,pmem分析基本差不多了。整体分析下来,pmem架构还是比较简单的。它的使用方法依赖上层构建的pmem分配的类,如例子中的话,使用步骤主要有如下步骤:
1,从pmem区申请一片内存
2,使用的时候从申请的一片内存中分配一块,指定其虚拟地址,物理地址和buffersize
3,释放第2步申请的pmem
4,使用结束时,将调用pmem类的析构函数,释放第一步申请的那一片pmem
阅读(2992) | 评论(0) | 转发(2) |
给主人留下些什么吧!~~