全部博文(404)
分类: LINUX
2008-09-25 17:04:41
void kmalloc (size_t size ,int priority); |
# define kfree (n) kfree_s( (n) ,0) |
void kfree_s (void * ptr ,int size); |
int request_irq (unsigned int irq ,void( * handler) int ,unsigned long type ,char * name); |
void exampledev_init(void) { if (register_chrdev(MAJOR_NUM, " exampledev ", &exampledev_fops)) TRACE_TXT("Device exampledev driver registered error"); else TRACE_TXT("Device exampledev driver registered successfully"); …//设备初始化 } |
struct file_operations { int (*lseek)(); int (*read)(); int (*write)(); int (*readdir)(); int (*select)(); int (*ioctl)(); int (*mmap)(); int (*open)(); void(*release)(); int (*fsync)(); int (*fasync)(); int (*check_media_change)(); void(*revalidate)(); }; |
int (*open)(struct inode * inode,struct file *filp); |
void (*release) (struct inode * inode,struct file *filp) ; |
ssize_t (*read) (struct file * filp, char * buf, size_t count, loff_t * offp); |
unsigned long copy_to_user (void * to, void * from, unsigned long len); |
ssize_t (*write) (struct file *, const char *, size_t, loff_t *); |
unsigned long copy_from_user(void *to, const void *from, unsigned long n); |
int (*ioctl) (struct inode * inode,struct file * filp,unsigned int cmd,unsigned long arg); |
struct file_operations exampledev_fops { NULL , exampledev_read , exampledev_write , NULL , NULL , exampledev_ioctl , NULL , exampledev_open , exampledev_release , NULL , NULL , NULL , NULL } ; |
struct file_operations exampledev_fops = { read: exampledev _read, write: exampledev _write, ioctl: exampledev_ioctl , open: exampledev_open , release : exampledev_release , }; |
#include #include #include #include #include #include #include #define DEVICE_NAME "leds" /*定义led 设备的名字*/ #define LED_MAJOR 231 /*定义led 设备的主设备号*/ static unsigned long led_table[] = { /*I/O 方式led 设备对应的硬件资源*/ GPIO_B10, GPIO_B8, GPIO_B5, GPIO_B6, }; /*使用ioctl 控制led*/ static int leds_ioctl(struct inode *inode, struct file *file, unsigned int cmd, unsigned long arg) { switch (cmd) { case 0: case 1: if (arg > 4) { return -EINVAL; } write_gpio_bit(led_table[arg], !cmd); default: return -EINVAL; } } static struct file_operations leds_fops = { owner: THIS_MODULE, ioctl: leds_ioctl, }; static devfs_handle_t devfs_handle; static int __init leds_init(void) { int ret; int i; /*在内核中注册设备*/ ret = register_chrdev(LED_MAJOR, DEVICE_NAME, &leds_fops); if (ret < 0) { printk(DEVICE_NAME " can't register major number\n"); return ret; } devfs_handle = devfs_register(NULL, DEVICE_NAME, DEVFS_FL_DEFAULT, LED_MAJOR, 0, S_IFCHR | S_IRUSR | S_IWUSR, &leds_fops, NULL); /*使用宏进行端口初始化,set_gpio_ctrl 和write_gpio_bit 均为宏定义*/ for (i = 0; i < 8; i++) { set_gpio_ctrl(led_table[i] | GPIO_PULLUP_EN | GPIO_MODE_OUT); write_gpio_bit(led_table[i], 1); } printk(DEVICE_NAME " initialized\n"); return 0; } static void __exit leds_exit(void) { devfs_unregister(devfs_handle); unregister_chrdev(LED_MAJOR, DEVICE_NAME); } module_init(leds_init); module_exit(leds_exit); |
#arm-linux-gcc -D__KERNEL__ -I/arm/kernel/include -DKBUILD_BASENAME=leds -DMODULE -c -o leds.o leds.c |
#insmod /lib/ leds.o |
#rmmod leds |
#include #include static void mtd_notify_add(struct mtd_info* mtd); static void mtd_notify_remove(struct mtd_info* mtd); static struct mtd_notifier notifier = { mtd_notify_add, mtd_notify_remove, NULL }; static devfs_handle_t devfs_dir_handle = NULL; static devfs_handle_t devfs_rw_handle[MAX_MTD_DEVICES]; static struct mtdblk_dev { struct mtd_info *mtd; /* Locked */ int count; struct semaphore cache_sem; unsigned char *cache_data; unsigned long cache_offset; unsigned int cache_size; enum { STATE_EMPTY, STATE_CLEAN, STATE_DIRTY } cache_state; } *mtdblks[MAX_MTD_DEVICES]; static spinlock_t mtdblks_lock; /* this lock is used just in kernels >= 2.5.x */ static spinlock_t mtdblock_lock; static int mtd_sizes[MAX_MTD_DEVICES]; static int mtd_blksizes[MAX_MTD_DEVICES]; static void erase_callback(struct erase_info *done) { wait_queue_head_t *wait_q = (wait_queue_head_t *)done->priv; wake_up(wait_q); } static int erase_write (struct mtd_info *mtd, unsigned long pos, int len, const char *buf) { struct erase_info erase; DECLARE_WAITQUEUE(wait, current); wait_queue_head_t wait_q; size_t retlen; int ret; /* * First, let's erase the flash block. */ init_waitqueue_head(&wait_q); erase.mtd = mtd; erase.callback = erase_callback; erase.addr = pos; erase.len = len; erase.priv = (u_long)&wait_q; set_current_state(TASK_INTERRUPTIBLE); add_wait_queue(&wait_q, &wait); ret = MTD_ERASE(mtd, &erase); if (ret) { set_current_state(TASK_RUNNING); remove_wait_queue(&wait_q, &wait); printk (KERN_WARNING "mtdblock: erase of region [0x%lx, 0x%x] " "on \"%s\" failed\n", pos, len, mtd->name); return ret; } schedule(); /* Wait for erase to finish. */ remove_wait_queue(&wait_q, &wait); /* * Next, writhe data to flash. */ ret = MTD_WRITE (mtd, pos, len, &retlen, buf); if (ret) return ret; if (retlen != len) return -EIO; return 0; } static int write_cached_data (struct mtdblk_dev *mtdblk) { struct mtd_info *mtd = mtdblk->mtd; int ret; if (mtdblk->cache_state != STATE_DIRTY) return 0; DEBUG(MTD_DEBUG_LEVEL2, "mtdblock: writing cached data for \"%s\" " "at 0x%lx, size 0x%x\n", mtd->name, mtdblk->cache_offset, mtdblk->cache_size); ret = erase_write (mtd, mtdblk->cache_offset, mtdblk->cache_size, mtdblk->cache_data); if (ret) return ret; mtdblk->cache_state = STATE_EMPTY; return 0; } static int do_cached_write (struct mtdblk_dev *mtdblk, unsigned long pos, int len, const char *buf) { … } static int do_cached_read (struct mtdblk_dev *mtdblk, unsigned long pos, int len, char *buf) { … } static int mtdblock_open(struct inode *inode, struct file *file) { … } static release_t mtdblock_release(struct inode *inode, struct file *file) { int dev; struct mtdblk_dev *mtdblk; DEBUG(MTD_DEBUG_LEVEL1, "mtdblock_release\n"); if (inode == NULL) release_return(-ENODEV); dev = minor(inode->i_rdev); mtdblk = mtdblks[dev]; down(&mtdblk->cache_sem); write_cached_data(mtdblk); up(&mtdblk->cache_sem); spin_lock(&mtdblks_lock); if (!--mtdblk->count) { /* It was the last usage. Free the device */ mtdblks[dev] = NULL; spin_unlock(&mtdblks_lock); if (mtdblk->mtd->sync) mtdblk->mtd->sync(mtdblk->mtd); put_mtd_device(mtdblk->mtd); vfree(mtdblk->cache_data); kfree(mtdblk); } else { spin_unlock(&mtdblks_lock); } DEBUG(MTD_DEBUG_LEVEL1, "ok\n"); BLK_DEC_USE_COUNT; release_return(0); } /* * This is a special request_fn because it is executed in a process context * to be able to sleep independently of the caller. The * io_request_lock (for <2.5) or queue_lock (for >=2.5) is held upon entry * and exit. The head of our request queue is considered active so there is * no need to dequeue requests before we are done. */ static void handle_mtdblock_request(void) { struct request *req; struct mtdblk_dev *mtdblk; unsigned int res; for (;;) { INIT_REQUEST; req = CURRENT; spin_unlock_irq(QUEUE_LOCK(QUEUE)); mtdblk = mtdblks[minor(req->rq_dev)]; res = 0; if (minor(req->rq_dev) >= MAX_MTD_DEVICES) panic("%s : minor out of bound", __FUNCTION__); if (!IS_REQ_CMD(req)) goto end_req; if ((req->sector + req->current_nr_sectors) > (mtdblk->mtd->size >> 9)) goto end_req; // Handle the request switch (rq_data_dir(req)) { int err; case READ: down(&mtdblk->cache_sem); err = do_cached_read (mtdblk, req->sector << 9, req->current_nr_sectors << 9, req->buffer); up(&mtdblk->cache_sem); if (!err) res = 1; break; case WRITE: // Read only device if ( !(mtdblk->mtd->flags & MTD_WRITEABLE) ) break; // Do the write down(&mtdblk->cache_sem); err = do_cached_write (mtdblk, req->sector << 9,req->current_nr_sectors << 9, req->buffer); up(&mtdblk->cache_sem); if (!err) res = 1; break; } end_req: spin_lock_irq(QUEUE_LOCK(QUEUE)); end_request(res); } } static volatile int leaving = 0; static DECLARE_MUTEX_LOCKED(thread_sem); static DECLARE_WAIT_QUEUE_HEAD(thr_wq); int mtdblock_thread(void *dummy) { … } #define RQFUNC_ARG request_queue_t *q static void mtdblock_request(RQFUNC_ARG) { /* Don't do anything, except wake the thread if necessary */ wake_up(&thr_wq); } static int mtdblock_ioctl(struct inode * inode, struct file * file, unsigned int cmd, unsigned long arg) { struct mtdblk_dev *mtdblk; mtdblk = mtdblks[minor(inode->i_rdev)]; switch (cmd) { case BLKGETSIZE: /* Return device size */ return put_user((mtdblk->mtd->size >> 9), (unsigned long *) arg); case BLKFLSBUF: if(!capable(CAP_SYS_ADMIN)) return -EACCES; fsync_dev(inode->i_rdev); invalidate_buffers(inode->i_rdev); down(&mtdblk->cache_sem); write_cached_data(mtdblk); up(&mtdblk->cache_sem); if (mtdblk->mtd->sync) mtdblk->mtd->sync(mtdblk->mtd); return 0; default: return -EINVAL; } } static struct block_device_operations mtd_fops = { owner: THIS_MODULE, open: mtdblock_open, release: mtdblock_release, ioctl: mtdblock_ioctl }; static void mtd_notify_add(struct mtd_info* mtd) { … } static void mtd_notify_remove(struct mtd_info* mtd) { if (!mtd || mtd->type == MTD_ABSENT) return; devfs_unregister(devfs_rw_handle[mtd->index]); } int __init init_mtdblock(void) { int i; spin_lock_init(&mtdblks_lock); /* this lock is used just in kernels >= 2.5.x */ spin_lock_init(&mtdblock_lock); #ifdef CONFIG_DEVFS_FS if (devfs_register_blkdev(MTD_BLOCK_MAJOR, DEVICE_NAME, &mtd_fops)) { printk(KERN_NOTICE "Can't allocate major number %d for Memory Technology Devices.\n", MTD_BLOCK_MAJOR); return -EAGAIN; } devfs_dir_handle = devfs_mk_dir(NULL, DEVICE_NAME, NULL); register_mtd_user(¬ifier); #else if (register_blkdev(MAJOR_NR,DEVICE_NAME,&mtd_fops)) { printk(KERN_NOTICE "Can't allocate major number %d for Memory Technology Devices.\n", MTD_BLOCK_MAJOR); return -EAGAIN; } #endif /* We fill it in at open() time. */ for (i=0; i< MAX_MTD_DEVICES; i++) { mtd_sizes[i] = 0; mtd_blksizes[i] = BLOCK_SIZE; } init_waitqueue_head(&thr_wq); /* Allow the block size to default to BLOCK_SIZE. */ blksize_size[MAJOR_NR] = mtd_blksizes; blk_size[MAJOR_NR] = mtd_sizes; BLK_INIT_QUEUE(BLK_DEFAULT_QUEUE(MAJOR_NR), &mtdblock_request, &mtdblock_lock); kernel_thread (mtdblock_thread, NULL, CLONE_FS|CLONE_FILES|CLONE_SIGHAND); return 0; } static void __exit cleanup_mtdblock(void) { leaving = 1; wake_up(&thr_wq); down(&thread_sem); #ifdef CONFIG_DEVFS_FS unregister_mtd_user(¬ifier); devfs_unregister(devfs_dir_handle); devfs_unregister_blkdev(MTD_BLOCK_MAJOR, DEVICE_NAME); #else unregister_blkdev(MAJOR_NR,DEVICE_NAME); #endif blk_cleanup_queue(BLK_DEFAULT_QUEUE(MAJOR_NR)); blksize_size[MAJOR_NR] = NULL; blk_size[MAJOR_NR] = NULL; } module_init(init_mtdblock); module_exit(cleanup_mtdblock); |
int register_blkdev(unsigned int major, const char *name, struct block_device_operations *bdops); int unregister_blkdev(unsigned int major, const char *name); |
BLK_INIT_QUEUE(BLK_DEFAULT_QUEUE(MAJOR_NR), &mtdblock_request, &mtdblock_lock); blk_cleanup_queue(BLK_DEFAULT_QUEUE(MAJOR_NR)); |
struct blk_dev_struct { /* * queue_proc has to be atomic */ request_queue_t request_queue; queue_proc *queue; void *data; }; |
struct request_queue { /* * the queue request freelist, one for reads and one for writes */ struct request_list rq[2]; /* * Together with queue_head for cacheline sharing */ struct list_head queue_head; elevator_t elevator; request_fn_proc * request_fn; merge_request_fn * back_merge_fn; merge_request_fn * front_merge_fn; merge_requests_fn * merge_requests_fn; make_request_fn * make_request_fn; plug_device_fn * plug_device_fn; /* * The queue owner gets to use this for whatever they like. * ll_rw_blk doesn't touch it. */ void * queuedata; /* * This is used to remove the plug when tq_disk runs. */ struct tq_struct plug_tq; /* * Boolean that indicates whether this queue is plugged or not. */ char plugged; /* * Boolean that indicates whether current_request is active or * not. */ char head_active; /* * Is meant to protect the queue in the future instead of * io_request_lock */ spinlock_t queue_lock; /* * Tasks wait here for free request */ wait_queue_head_t wait_for_request; }; |