編輯:關於Android編程
在android中,pmem是特地從內存中劃出來的一塊,給android的用戶空間需要物理上連續的進程使用。
我們首先分析pmem驅動。
pmem使用的是platform bus注冊的。
1,device部分:
struct platform_device mxc_android_pmem_device = {
.name = "android_pmem",
.id = 0,
};
其data部分:
static struct android_pmem_platform_data android_pmem_pdata = {
.name = "pmem_adsp",
.start = 0,
.size = SZ_32M,
.no_allocator = 0,
.cached = PMEM_NONCACHE_NORMAL,
};
android_pmem_pdata.start在fixup_mxc_board中重新計算了:
size = t->u.mem.size;
android_pmem_pdata.start =
PHYS_OFFSET + size - android_pmem_pdata.size;
2,driver部分:
在drivers/misc/pmem.c定義:
static struct platform_driver pmem_driver = {
.probe = pmem_probe,
.remove = pmem_remove,
.driver = { .name = "android_pmem" }
};
device和driver匹配後將執行pmem_probe:
static int pmem_probe(struct platform_device *pdev)
{
struct android_pmem_platform_data *pdata;
if (!pdev || !pdev->dev.platform_data) {
printk(KERN_ALERT "Unable to probe pmem!\n");
return -1;
}
pdata = pdev->dev.platform_data;
return pmem_setup(pdata, NULL, NULL);//2-1
}
2-1:pmem_setup(pdata, NULL, NULL):
int pmem_setup(struct android_pmem_platform_data *pdata,
long (*ioctl)(struct file *, unsigned int, unsigned long),
int (*release)(struct inode *, struct file *))
{
int err = 0;
int i, index = 0;
int id = id_count;
id_count++;
pmem[id].no_allocator = pdata->no_allocator; //為0
pmem[id].cached = pdata->cached; //PMEM_NONCACHE_NORMAL
pmem[id].buffered = pdata->buffered;
pmem[id].base = pdata->start;//開始位置
pmem[id].size = pdata->size;//大小
pmem[id].ioctl = ioctl;
pmem[id].release = release;
init_rwsem(&pmem[id].bitmap_sem);
init_MUTEX(&pmem[id].data_list_sem);
INIT_LIST_HEAD(&pmem[id].data_list);
pmem[id].dev.name = pdata->name;
pmem[id].dev.minor = id;//次設備好
pmem[id].dev.fops = &pmem_fops;//fops
printk(KERN_INFO "%s: %d init\n", pdata->name, pdata->cached);
err = misc_register(&pmem[id].dev);//注冊pmem設備
if (err) {
printk(KERN_ALERT "Unable to register pmem driver!\n");
goto err_cant_register_device;
}
pmem[id].num_entries = pmem[id].size / PMEM_MIN_ALLOC;//有多少頁
pmem[id].bitmap = kmalloc(pmem[id].num_entries * //申請這麼多個pmem_bits
sizeof(struct pmem_bits), GFP_KERNEL);
if (!pmem[id].bitmap)
goto err_no_mem_for_metadata;
memset(pmem[id].bitmap, 0, sizeof(struct pmem_bits) *
pmem[id].num_entries);
for (i = sizeof(pmem[id].num_entries) * 8 - 1; i >= 0; i--) {
if ((pmem[id].num_entries) & 1<<i) {
PMEM_ORDER(id, index) = i;
index = PMEM_NEXT_INDEX(id, index);
}
}
if (pmem[id].cached)
pmem[id].vbase = ioremap_cached(pmem[id].base,
pmem[id].size);
#ifdef ioremap_ext_buffered
else if (pmem[id].buffered)
pmem[id].vbase = ioremap_ext_buffered(pmem[id].base,
pmem[id].size);
#endif
else
pmem[id].vbase = ioremap(pmem[id].base, pmem[id].size);//映射pmem的基地址
if (pmem[id].vbase == 0)
goto error_cant_remap;
pmem[id].garbage_pfn = page_to_pfn(alloc_page(GFP_KERNEL));
if (pmem[id].no_allocator)
pmem[id].allocated = 0;
#if PMEM_DEBUG
debugfs_create_file(pdata->name, S_IFREG | S_IRUGO, NULL, (void *)id,
&debug_fops);
#endif
return 0;
error_cant_remap:
kfree(pmem[id].bitmap);
err_no_mem_for_metadata:
misc_deregister(&pmem[id].dev);
err_cant_register_device:
return -1;
}
我們發現pmem_setup將pmem初始化後注冊為misc設備。我們後面上層對pmem訪問就是對該misc設備的操作,所以fops非常重要:
struct file_operations pmem_fops = {
.release = pmem_release,
.mmap = pmem_mmap,
.open = pmem_open,
.unlocked_ioctl = pmem_ioctl,
};
我們首先看下pmem_open:
static int pmem_open(struct inode *inode, struct file *file)
{
struct pmem_data *data;//pmem_data結構,每打開一次新建一個pmem_data
int id = get_id(file);
int ret = 0;
DLOG("current %u file %p(%d)\n", current->pid, file, file_count(file));
/* setup file->private_data to indicate its unmapped */
/* you can only open a pmem device one time */
if (file->private_data != NULL)
return -1;
data = kmalloc(sizeof(struct pmem_data), GFP_KERNEL);
if (!data) {
printk("pmem: unable to allocate memory for pmem metadata.");
return -1;
}
data->flags = 0;
data->index = -1;
data->task = NULL;
data->vma = NULL;
data->pid = 0;
data->master_file = NULL;
#if PMEM_DEBUG
data->ref = 0;
#endif
INIT_LIST_HEAD(&data->region_list);
init_rwsem(&data->sem);
file->private_data = data;//賦值
INIT_LIST_HEAD(&data->list);
down(&pmem[id].data_list_sem);
list_add(&data->list, &pmem[id].data_list);//放到data_list
up(&pmem[id].data_list_sem);
return ret;
}
我們看到每打開一個pmem,將有一個pmem_data添加到pmem[id].data_list。並將該pmem_data賦值給file->private_data。
我們在看下pmem_mmap:
static int pmem_mmap(struct file *file, struct vm_area_struct *vma)//這裡vma是系統調用時從用戶空間找到的一塊空閒虛擬空間,用來映射pmem空間
{
struct pmem_data *data;
int index;
unsigned long vma_size = vma->vm_end - vma->vm_start;
int ret = 0, id = get_id(file);
if (vma->vm_pgoff || !PMEM_IS_PAGE_ALIGNED(vma_size)) {
#if PMEM_DEBUG
printk(KERN_ERR "pmem: mmaps must be at offset zero, aligned"
" and a multiple of pages_size.\n");
#endif
return -EINVAL;
}
data = (struct pmem_data *)file->private_data;//打開時保存的pmem_data
down_write(&data->sem);
/* check this file isn't already mmaped, for submaps check this file
* has never been mmaped */
if ((data->flags & PMEM_FLAGS_MASTERMAP) ||
(data->flags & PMEM_FLAGS_SUBMAP) ||
(data->flags & PMEM_FLAGS_UNSUBMAP)) {
#if PMEM_DEBUG
printk(KERN_ERR "pmem: you can only mmap a pmem file once, "
"this file is already mmaped. %x\n", data->flags);
#endif
ret = -EINVAL;
goto error;
}
/* if file->private_data == unalloced, alloc*/
if (data && data->index == -1) {
down_write(&pmem[id].bitmap_sem);
index = pmem_allocate(id, vma->vm_end - vma->vm_start);//從pmem中申請一塊內存
up_write(&pmem[id].bitmap_sem);
data->index = index;
}
/* either no space was available or an error occured */
if (!has_allocation(file)) {
ret = -EINVAL;
printk("pmem: could not find allocation for map.\n");
goto error;
}
if (pmem_len(id, data) < vma_size) {
#if PMEM_DEBUG
printk(KERN_WARNING "pmem: mmap size [%lu] does not match"
"size of backing region [%lu].\n", vma_size,
pmem_len(id, data));
#endif
ret = -EINVAL;
goto error;
}
vma->vm_pgoff = pmem_start_addr(id, data) >> PAGE_SHIFT;//pmem的物理地址
vma->vm_page_prot = phys_mem_access_prot(file, vma->vm_page_prot);//映射的標記
if (data->flags & PMEM_FLAGS_CONNECTED) {//該data標記說明是不同進程共享同一塊區域
struct pmem_region_node *region_node;
struct list_head *elt;
if (pmem_map_garbage(id, vma, data, 0, vma_size)) {
printk("pmem: mmap failed in kernel!\n");
ret = -EAGAIN;
goto error;
}
list_for_each(elt, &data->region_list) {//從region_list尋找region_node
region_node = list_entry(elt, struct pmem_region_node,
list);
DLOG("remapping file: %p %lx %lx\n", file,
region_node->region.offset,
region_node->region.len);
if (pmem_remap_pfn_range(id, vma, data, //為分配的pmem建立頁表
region_node->region.offset,
region_node->region.len)) {
ret = -EAGAIN;
goto error;
}
}
data->flags |= PMEM_FLAGS_SUBMAP;//與其它進程共享
get_task_struct(current->group_leader);
data->task = current->group_leader;
data->vma = vma;
#if PMEM_DEBUG
data->pid = current->pid;
#endif
DLOG("submmapped file %p vma %p pid %u\n", file, vma,
current->pid);
} else {
if (pmem_map_pfn_range(id, vma, data, 0, vma_size)) {////為分配的pmem建立頁表
printk(KERN_INFO "pmem: mmap failed in kernel!\n");
ret = -EAGAIN;
goto error;
}
data->flags |= PMEM_FLAGS_MASTERMAP;//第一次映射,即暫時獨享
data->pid = current->pid;
}
vma->vm_ops = &vm_ops;
error:
up_write(&data->sem);
return ret;
}
pmem_mmap主要將申請的虛擬用戶空間映射到pmem分配的指定空間,利用pmem_map_pfn_range創建頁表。
下面看下pmem_ioctl:
static long pmem_ioctl(struct file *file, unsigned int cmd, unsigned long arg)
{
struct pmem_data *data;
int id = get_id(file);
switch (cmd) {
case PMEM_GET_PHYS://得到物理參數,如果物理地址,數據長度
{
struct pmem_region region;
DLOG("get_phys\n");
if (!has_allocation(file)) {
region.offset = 0;
region.len = 0;
} else {
data = (struct pmem_data *)file->private_data;
region.offset = pmem_start_addr(id, data);
region.len = pmem_len(id, data);
}
printk(KERN_INFO "pmem: request for physical address of pmem region "
"from process %d.\n", current->pid);
if (copy_to_user((void __user *)arg, ®ion,
sizeof(struct pmem_region)))
return -EFAULT;
break;
}
case PMEM_MAP://映射
{
struct pmem_region region;
if (copy_from_user(®ion, (void __user *)arg,
sizeof(struct pmem_region)))
return -EFAULT;
data = (struct pmem_data *)file->private_data;
return pmem_remap(®ion, file, PMEM_MAP);
}
break;
case PMEM_UNMAP://解映射
{
struct pmem_region region;
if (copy_from_user(®ion, (void __user *)arg,
sizeof(struct pmem_region)))
return -EFAULT;
data = (struct pmem_data *)file->private_data;
return pmem_remap(®ion, file, PMEM_UNMAP);
break;
}
case PMEM_GET_SIZE://得到大小
{
struct pmem_region region;
DLOG("get_size\n");
pmem_get_size(®ion, file);
if (copy_to_user((void __user *)arg, ®ion,
sizeof(struct pmem_region)))
return -EFAULT;
break;
}
case PMEM_GET_TOTAL_SIZE://得到總的pmem大小
{
struct pmem_region region;
DLOG("get total size\n");
region.offset = 0;
get_id(file);
region.len = pmem[id].size;
if (copy_to_user((void __user *)arg, ®ion,
sizeof(struct pmem_region)))
return -EFAULT;
break;
}
case PMEM_ALLOCATE://申請一塊pmem內存
{
if (has_allocation(file))
return -EINVAL;
data = (struct pmem_data *)file->private_data;
data->index = pmem_allocate(id, arg);
break;
}
case PMEM_CONNECT://共享pmem內存
DLOG("connect\n");
return pmem_connect(arg, file);
break;
case PMEM_CACHE_FLUSH:
{
struct pmem_region region;
DLOG("flush\n");
if (copy_from_user(®ion, (void __user *)arg,
sizeof(struct pmem_region)))
return -EFAULT;
flush_pmem_file(file, region.offset, region.len);
break;
}
default:
if (pmem[id].ioctl)
return pmem[id].ioctl(file, cmd, arg);
return -EINVAL;
}
return 0;
}
ioctl主要是要搞清楚各個命令的含義。
下面我們看下具體應用,hardware/mx5x/libcamera/Camera_pmem.cpp:
PmemAllocator:memAllocator(int bufCount, int bufSize)://構造函數,調用pmem分配時候,將傳入兩個值分別是 bufCount和bufSize,從變量的名字我們就差不多知道他們意思了。
err_ret(0), mFD(0),mTotalSize(0),mBufCount(bufCount),mBufSize(bufSize),//初始化變量
mVirBase(NULL),mPhyBase(NULL)
{
LOG_FUNCTION_NAME;
memset(mSlotAllocated, 0, sizeof(bool)*MAX_SLOT);//將所有槽的標記清0,它將pmem默認分為MAX_SLOT份,這個分法我認為不是很嚴謹,容易溢出
int err;
struct pmem_region region;
mFD = open(PMEM_DEV, O_RDWR);//打開pmem設備,就是上面驅動中注冊的misc設備
if (mFD < 0) {
LOGE("Error!PmemAllocator constructor");
err_ret = -1;
return;
}
err = ioctl(mFD, PMEM_GET_TOTAL_SIZE, ®ion);//得到總的pmem大小
if (err == 0)
{
LOGE("Info!get pmem total size %d",(int)region.len);
}
else
{
LOGE("Error!Cannot get total length in PmemAllocator constructor");
err_ret = -1;
return;
}
mBufSize = (bufSize + DEFAULT_PMEM_ALIGN-1) & ~(DEFAULT_PMEM_ALIGN-1);//要頁對齊的
mTotalSize = mBufSize*bufCount;//要申請的大小
if((mTotalSize > region.len)||(mBufCount > MAX_SLOT)) {//判斷pmem是否能滿足需求大小
LOGE("Error!Out of PmemAllocator capability");
}
else
{
uint8_t *virtualbase = (uint8_t*)mmap(0, mTotalSize,//映射申請的大小,有上面pmem分析得知,它將會把用戶空間映射到pmem區域
PROT_READ|PROT_WRITE, MAP_SHARED, mFD, 0);
if (virtualbase == MAP_FAILED) {
LOGE("Error!mmap(fd=%d, size=%u) failed (%s)",
mFD, (unsigned int)mTotalSize, strerror(errno));
return;
}
memset(®ion, 0, sizeof(region));
if (ioctl(mFD, PMEM_GET_PHYS, ®ion) == -1)//得到映射的物理參數,如物理地址,映射長度
{
LOGE("Error!Failed to get physical address of source!\n");
munmap(virtualbase, mTotalSize);
return;
}
mVirBase = (void *)virtualbase;//賦值給全局變量
mPhyBase = region.offset;//就是剛才得到的物理參數中的
LOGV("Allocator total size %d, vir addr 0x%x, phy addr 0x%x",mTotalSize,mVirBase,mPhyBase);
}
}
PmemAllocator::~PmemAllocator()//析構函數
{
LOG_FUNCTION_NAME;
for(int index=0;index < MAX_SLOT;index ++) {
if(mSlotAllocated[index]) {
LOGE("Error!Cannot deinit PmemAllocator before all memory back to allocator");
}
}
if(mVirBase) {
munmap(mVirBase, mTotalSize);
}
if(mFD) {
close(mFD);
}
}
int PmemAllocator::allocate(struct picbuffer *pbuf, int size)//這個函數的功能是從構造函數中申請的pmem中分配一塊bufSize大小的內存
{
LOG_FUNCTION_NAME;
if((!mVirBase)||(!pbuf)||(size>mBufSize)) {//一般size等於mBufSize
LOGE("Error!No memory for allocator");
return -1;
}
for(int index=0;index < MAX_SLOT;index ++) {
if(!mSlotAllocated[index]) {//找到還沒被使用的一塊
LOGE("Free slot %d for allocating mBufSize %d request size %d",
index,mBufSize,size);
pbuf->virt_start= (unsigned char *)mVirBase+index*mBufSize;
pbuf->phy_offset= mPhyBase+index*mBufSize;
pbuf->length= mBufSize;
mSlotAllocated[index] = true;//置上被使用的標記
return 0;
}
}
return -1;
}
int PmemAllocator::deAllocate(struct picbuffer *pbuf)//刪除allocate分配的buffer使用
{
LOG_FUNCTION_NAME;
if((!mVirBase)||(!pbuf)) {
LOGE("Error!No memory for allocator");
return -1;
}
int nSlot = ((unsigned int)pbuf->virt_start- (unsigned int)mVirBase)/mBufSize;
if((nSlot<MAX_SLOT)&&(mSlotAllocated[nSlot])) {
LOGE("Info!deAllocate for slot %d",nSlot);
mSlotAllocated[nSlot] = false;
return 0;
}
else{
LOGE("Error!Not a valid buffer");
return -1;
}
}
uvc camera中使用實例:
#ifdef UVC_CAMERA
{
mPmemAllocator = new PmemAllocator(1, target_size);//實例化PmemAllocator,將調用其構造函數,將向pmem申請1*target_size大小的內存
if(mPmemAllocator->allocate(&(mIPUprocessbuf[0]),target_size) < 0){//申請一塊target_size大小的pmem
LOGE("allocate the %d buffer for ipu process error", i);
goto exit1;
}
IPUConverter(mCaptureBuffers[cfilledbuffer.index].phy_offset,mIPUprocessbuf[0].phy_offset,
mPictureWidth,mPictureHeight,mPictureWidth,mPictureHeight,V4L2_PIX_FMT_YUYV,V4L2_PIX_FMT_YUV420);
}
buf1 = mIPUprocessbuf[0].virt_start;
#endif
#ifdef UVC_CAMERA
if (mPmemAllocator != NULL){
mPmemAllocator->deAllocate(&mIPUprocessbuf[0]);//釋放剛申請的一塊target_size大小的pmem
mPmemAllocator = NULL;
}
if (mCameraOpened != 0)
cameraClose();
#endif
好了,pmem分析基本差不多了。整體分析下來,pmem架構還是比較簡單的。它的使用方法依賴上層構建的pmem分配的類,如例子中的話,使用步驟主要有如下步驟:
1,從pmem區申請一片內存
2,使用的時候從申請的一片內存中分配一塊,指定其虛擬地址,物理地址和buffersize
3,釋放第2步申請的pmem
4,使用結束時,將調用pmem類的析構函數,釋放第一步申請的那一片pmem
http://www.qrsdev.com/forum.php?mod=viewthread&tid=485&extra=page%3D1
下面代碼設計了異步任務、JSon解析、自定義組件、IO流、文件下載、適配器原理等知識點。 代碼實現從服務器上讀取Json字符串。 Json字符串如下。 {object:{
首先推薦一下鴻洋大大的打造個性的圖片預覽與多點觸控視頻教程,這套教程教我們一步一步實現了多點觸控實現對圖片的平移和縮放的功能,這篇文章我將在鴻洋大大的基礎之上做了一些擴展
自定義PullToRefreshListView繼承ListView,在ListView頭部添加一個下拉的頭部布局。跟ListView用法完全一致。該自定義Listvie
一、前言好久沒有更新過博客了,趁今天有空分享一個導航欄的自定義控件。有關此控件的demo相信在網上已經爛大街了,一搜一大把。我現在只著重分享一些我認為比較難理解的知識點。