2 * drivers/agl/evk_lib.c
4 * Event library (kernel space part)
6 * @copyright Copyright (c) 2016-2019 TOYOTA MOTOR CORPORATION.
8 * This program is free software; you can redistribute it and/or modify it
9 * under the terms of the GNU General Public License published
10 * by the Free Software Foundation.
12 * This program is distributed in the hope that it will be useful,
13 * but WITHOUT ANY WARRANTY; without even the implied warranty of
14 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
15 * GNU General Public License for more details.
17 * You should have received a copy of the GNU General Public License
18 * along with this program. If not, see <http://www.gnu.org/licenses/>.
22 #include <generated/autoconf.h>
23 #include <linux/module.h>
24 #include <linux/init.h>
25 #include <linux/version.h>
26 #include <linux/kernel.h>
27 #include <linux/slab.h>
28 #include <linux/sched.h>
29 #include <linux/wait.h>
30 #include <linux/poll.h>
31 #include <linux/list.h>
32 #include <asm/uaccess.h>
33 #include <linux/errno.h>
34 #include <linux/vmalloc.h>
36 #include <linux/proc_fs.h>
37 #include <linux/seq_file.h>
39 #include <linux/device.h>
40 #include <linux/cdev.h>
42 #ifndef STANDARD_INT_DEFINITIONS
43 #define STANDARD_INT_DEFINITIONS
52 #endif /* !STANDARD_INT_DEFINITIONS */
54 #define ENABLE_PROC_FS 1
56 #include <agldd/evk_lib.h>
58 /** @addtogroup EV_in */
60 /** In Linux2.4, list_for_each_entry is not provided, so it is prepared by self (in 2.6)
62 #ifdef list_for_each_entry
63 #define __LINUX_26_OR_HIGHER
66 #ifndef __LINUX_26_OR_HIGHER /* linux v2.4 */
68 #define list_for_each_entry(pos, head, member) \
69 for (pos = list_entry((head)->next, typeof(*pos), member), \
70 prefetch(pos->member.next); \
71 &pos->member != (head); \
72 pos = list_entry(pos->member.next, typeof(*pos), member), \
73 prefetch(pos->member.next))
75 #define list_for_each_entry_safe(pos, n, head, member) \
76 for (pos = list_entry((head)->next, typeof(*pos), member), \
77 n = list_entry(pos->member.next, typeof(*pos), member); \
78 &pos->member != (head); \
79 pos = n, n = list_entry(n->member.next, typeof(*n), member))
81 #else /* linux v2.6 */
83 #include <linux/jiffies.h>
85 #endif /* linux v2.6 */
87 #define EVK_assert(cond, mesg) \
89 printk(KERN_ALERT "[EVK]ASSERT(pid:%d): " #cond " at %s:%d; " \
90 #mesg "\n", current->pid, __FILE__, __LINE__); \
94 #define EVK_BUG(mesg) \
95 printk(KERN_ALERT "[EVK]BUG: " mesg); \
99 #define EVK_info0(s) printk(KERN_ALERT "[EVK]INFO: " s)
100 #define EVK_info1(s, t) printk(KERN_ALERT "[EVK]INFO: " s, t)
103 #define EVK_info1(s, t)
106 static int devmajor = EVK_DEV_MAJOR;
107 static int devminor = 0;
108 static int nrdevs = 1;
109 static struct cdev cdev;
110 static struct class *pClass;
112 DEFINE_SEMAPHORE(evk_mtx);
114 static int down_line;
115 #define EVK_mutex_lock() { \
117 down_pid = current->pid; \
118 down_line = __LINE__; \
120 #define EVK_mutex_unlock() (up(&evk_mtx))
122 #ifdef EVK_USE_KMALLOC
123 #define evk_malloc(s) kmalloc((s), GFP_KERNEL)
124 #define evk_free kfree
125 #else // use vmalloc (this is the default)
126 #define evk_malloc(s) vmalloc((s))
127 #define evk_free vfree
141 #define GET_USER_OR_KERNEL(to, from) \
144 if (mem == evk_mem_user) { \
145 err = __get_user((to), &(from)); \
153 #define PUT_USER_OR_KERNEL(value, to) \
156 if (mem == evk_mem_user) { \
157 err = __put_user((value), &(to)); \
166 /** @brief Common part of the flag structure and message queue structure */
167 #define EVK_COMMON_QUEUE_ELEMS \
168 struct list_head list; /**< List structure */ \
169 wait_queue_head_t wq_head; /**< Wait_queue of a process waiting for a queue */\
170 EV_ID queueID; /**< Flag ID/Queue ID */ \
171 UINT32 seq_num; /**< Order of event arrival */ \
172 pid_t read_pid; /**< Read process ID */ \
173 pid_t pid; /**< Owning process ID */
175 /** @brief Common part of the flag structure and message queue structure */
176 struct common_queue {
177 EVK_COMMON_QUEUE_ELEMS
180 /** @brief Flag structure */
182 EVK_COMMON_QUEUE_ELEMS
183 UINT32 value;/**< Flags value */
185 #define EVK_PFLAG(queue) ((struct flag*)queue)
187 /** @brief 64-bit flag structure */
189 EVK_COMMON_QUEUE_ELEMS
190 UINT64 value;/**< Flags value */
192 #define EVK_PFLAG64(queue) ((struct flag64*)queue)
194 /** @brief Message queue structure */
195 struct message_queue {
196 EVK_COMMON_QUEUE_ELEMS
197 UINT8 type;/**< Type */
198 UINT8 length;/**< Queue length */
199 UINT8 num;/**< Number of messages stored */
200 UINT8 readptr;/**< Next read position(0~length-1) */
201 UINT32 max_bytes;/**< -Maximum bytes per message */
202 UINT8 *message;/**< Message storage area (ring buffer) */
204 #define EVK_PQUEUE(queue) ((struct message_queue*)queue)
206 /** @brief Number of bytes to allocate per message
208 * This function allocates an area to store the number of bytes actually stored, the time of occurrence,
209 * and the senderInfo, in addition to the number of bytes specified by max_bytes.
211 #define EVK_message_block_size(max_bytes) (sizeof(UINT32) * 3 + (max_bytes))
215 /** @brief Maximum number of flags used by all systems */
216 #define EVK_MAX_FLAGS 48
217 /** @brief Maximum number of 64-bit flags used by all systems */
218 #define EVK_MAX_FLAG64S 4
219 /** @brief Maximum number of message event queues used by all systems */
220 /* M1SP BM3547 MESSAGE_QUEUES 128->144 */
221 /* M9AT BM2066 MESSAGE_QUEUES 144->218 */
222 #define EVK_MAX_MESSAGE_QUEUES 224
224 /** @brief Allocate flag structure statically */
225 static struct flag _flag_pool[EVK_MAX_FLAGS];
226 /** @brief Statically allocates a 64-bit flag structure */
227 static struct flag64 _flag64_pool[EVK_MAX_FLAG64S];
228 /** @brief Beginning of the list of unused flags */
229 static LIST_HEAD(flag_pool);
230 /** @brief Beginning of the list of unused 64-bit flags */
231 static LIST_HEAD(flag64_pool);
233 /** @brief Allocate message queue structure statically */
234 static struct message_queue _message_queue_pool[EVK_MAX_MESSAGE_QUEUES];
235 /** @brief Top of the list of unused message queues */
236 static LIST_HEAD(message_queue_pool);
238 /** @brief List of Flags/Message Queues in Use
240 * Connects the usage flag/message queue to a list for each hash value obtained from the ID.
241 * The hash value is the remainder of the ID divided by HASH_KEY.
243 static struct list_head queue_entry[HASH_KEY];
245 /** @brief Sequence number to use during automatic ID assignment */
246 static EV_ID sequence_id = 0;
248 /** @brief Number to be assigned in order of occurrence of the event */
249 static UINT32 sequence_number = 0;
252 evk_get_queue_entry(struct list_head **entries)
255 *entries = queue_entry;
261 static __inline__ int
262 calc_hash(UINT32 val)
264 return val % HASH_KEY;
272 //EVK_assert(!list_empty(&flag_pool), "flag pool empty");
273 if (list_empty(&flag_pool)) {
274 printk("%s ERROR: flag pool empty\n", __func__);
277 queue = (struct flag *)flag_pool.next;
278 list_del_init(&(queue->list));
286 struct flag64 *queue;
287 //EVK_assert(!list_empty(&flag64_pool), "flag64 pool empty");
288 if (list_empty(&flag64_pool)) {
289 printk("%s ERROR: flag64 pool empty\n", __func__);
292 queue = (struct flag64 *)flag64_pool.next;
293 list_del_init(&(queue->list));
297 static __inline__ void
298 free_flag(struct flag *queue)
300 list_add((struct list_head *)queue, &flag_pool);
303 static __inline__ void
304 free_flag64(struct flag64 *queue)
306 list_add((struct list_head *)queue, &flag64_pool);
310 struct message_queue *
311 alloc_message_queue(void)
313 struct message_queue *queue;
314 //EVK_assert(!list_empty(&message_queue_pool), "message queue pool empty");
315 if (list_empty(&message_queue_pool)) {
316 printk("%s ERROR: message queue pool empty\n", __func__);
319 queue = (struct message_queue *)message_queue_pool.next;
320 list_del_init(&(queue->list));
321 queue->message = NULL;
325 static __inline__ void
326 free_message_queue(struct message_queue *queue)
328 if (queue->message != NULL) {
329 evk_free(queue->message);
330 queue->message = NULL;
332 list_add((struct list_head *)queue, &message_queue_pool);
336 struct common_queue *
337 find_queue_entry(EV_ID queueID)
339 struct list_head *list;
340 struct common_queue *queue;
341 int hash = calc_hash(queueID);
342 list = &(queue_entry[hash]);
343 list_for_each_entry(queue, list, list) { /* pgr0060 */ /* pgr0039 */
344 if (queue->queueID == queueID) {
351 static __inline__ void
352 attach_queue_entry(struct common_queue *queue, EV_ID queueID)
354 int hash = calc_hash(queueID);
355 list_add_tail((struct list_head *)queue, &(queue_entry[hash]));
358 static __inline__ void
359 detach_queue_entry(struct common_queue *queue)
361 list_del_init((struct list_head *)queue);
364 static __inline__ void
365 init_common_queue(struct common_queue *queue, EV_ID queueID)
367 queue->queueID = queueID;
368 queue->pid = current->pid;
370 init_waitqueue_head(&(queue->wq_head));
373 static __inline__ void
374 evk_init_flag(struct flag *queue, EV_ID queueID)
376 init_common_queue((struct common_queue *)queue, queueID);
380 static __inline__ void
381 evk_init_flag64(struct flag64 *queue, EV_ID queueID)
383 init_common_queue((struct common_queue *)queue, queueID);
387 static __inline__ int
388 evk_init_message_queue(struct message_queue *queue, EV_ID queueID,
389 UINT8 length, UINT32 max_bytes, UINT8 type)
391 init_common_queue((struct common_queue *)queue, queueID);
393 queue->length = length;
394 queue->max_bytes = max_bytes;
397 EVK_assert(queue->message == NULL, "message buffer was not freed");
398 queue->message = evk_malloc(length * EVK_message_block_size(max_bytes));
399 EVK_assert(queue->message != NULL, "can't alloc message buffer");
406 struct list_head *list;
407 struct common_queue *queue;
409 for(i = 0 ; i < HASH_KEY ; i++) {
410 list = &(queue_entry[i]);
411 if (!list_empty(list)) {
412 printk(KERN_ALERT "%d->", i);
413 list_for_each_entry(queue, list, list) { /* pgr0060 */ /* pgr0039 */
414 printk("%x[%x] ", queue->queueID, queue->seq_num);
422 evk_destroy_queue(EV_ID queueID)
424 struct common_queue *queue;
426 EVK_info1("flag destroy %x\n", queueID);
428 EVK_mutex_lock(); /*************************************/
429 queue = find_queue_entry(queueID);
435 detach_queue_entry(queue);
437 /* wake up processes before destruction */
438 wake_up_interruptible(&(queue->wq_head));
440 init_common_queue(queue, EV_INVALID_ID);
442 if (EV_ID_IS_FLAG(queueID)) {
443 free_flag((struct flag *)queue);
444 } else if (EV_ID_IS_FLAG64(queueID)) {
445 free_flag64((struct flag64 *)queue);
446 } else if (EV_ID_IS_QUEUE(queueID)) {
447 free_message_queue((struct message_queue *)queue);
451 EVK_mutex_unlock(); /*************************************/
456 evk_open(struct inode *inode, struct file *file)
458 // Recording of current and measures not to be read or deleted from others are required. */
459 file->private_data = (void *)EV_INVALID_ID;
464 evk_close(struct inode *inode, struct file *file)
466 if (EV_ID_IS_VALID((EV_ID)file->private_data)) {
467 evk_destroy_queue((EV_ID)file->private_data);
469 file->private_data = (void *)EV_INVALID_ID;
474 evk_create_flag(EV_ID queueID)
478 EVK_info1("flag create %x\n", queueID);
480 EVK_mutex_lock(); /*************************************/
482 queue = (struct flag *)find_queue_entry(queueID);
488 queue = alloc_flag();
494 evk_init_flag(queue, queueID);
495 attach_queue_entry((struct common_queue *)queue, queueID);
499 EVK_mutex_unlock(); /***********************************/
504 evk_create_flag64(EV_ID queueID)
506 struct flag64 *queue;
508 EVK_info1("flag64 create %x\n", queueID);
510 EVK_mutex_lock(); /*************************************/
512 queue = (struct flag64 *)find_queue_entry(queueID);
518 queue = alloc_flag64();
524 evk_init_flag64(queue, queueID);
525 attach_queue_entry((struct common_queue *)queue, queueID);
529 EVK_mutex_unlock(); /***********************************/
534 evk_create_message_queue(EV_ID queueID, UINT8 length,
535 UINT32 max_bytes, EV_Message_Queue_Type type)
537 struct message_queue *queue;
539 EVK_info1("message create %x\n", queueID);
543 EVK_mutex_lock(); /*************************************/
545 queue = (struct message_queue *)find_queue_entry(queueID);
551 queue = alloc_message_queue();
557 err = evk_init_message_queue(queue, queueID, length, max_bytes, type);
559 attach_queue_entry((struct common_queue *)queue, queueID);
561 free_message_queue(queue);
566 EVK_mutex_unlock(); /***********************************/
575 if ((sequence_id & EV_RESERVED_BIT) != 0) {/* round to 1 */
577 EVK_info0("auto ID rounded\n");
584 evk_alloc_flagID(EV_ID *queueID)
588 EVK_mutex_lock(); /*************************************/
590 seq_id = get_seq_id();
591 seq_id |= (EV_ID_BIT | EV_FLAG_BIT | EV_AUTO_ID_BIT);
592 } while(find_queue_entry(seq_id) != NULL);
593 EVK_mutex_unlock(); /*************************************/
600 evk_alloc_flag64ID(EV_ID *queueID)
604 EVK_mutex_lock(); /*************************************/
606 seq_id = get_seq_id();
607 seq_id |= (EV_ID_BIT | EV_FLAG64_BIT | EV_AUTO_ID_BIT);
608 } while(find_queue_entry(seq_id) != NULL);
609 EVK_mutex_unlock(); /*************************************/
616 evk_alloc_queueID(EV_ID *queueID)
620 EVK_mutex_lock(); /*************************************/
622 seq_id = get_seq_id();
623 seq_id |= (EV_ID_BIT | EV_QUEUE_BIT | EV_AUTO_ID_BIT);
624 } while(find_queue_entry(seq_id) != NULL);
625 EVK_mutex_unlock(); /*************************************/
632 evk_store_flag(EV_Flag *ev, int mem)
639 if (GET_USER_OR_KERNEL(flagID, ev->flagID)) /* pgr0039 */
641 if (GET_USER_OR_KERNEL(bits, ev->bits)) /* pgr0039 */
644 EVK_mutex_lock(); /*************************************/
646 queue = (struct flag *)find_queue_entry(flagID); /* pgr0000 */
648 EVK_info1("set_flag: No such ID %x\n", flagID);
653 if (queue->value == 0) {
654 queue->seq_num = sequence_number++;
656 queue->value |= bits; /* pgr0000 */
658 wake_up_interruptible(&(queue->wq_head));
661 EVK_mutex_unlock(); /***********************************/
666 evk_store_flag64(EV_Flag64 *ev, int mem)
668 struct flag64 *queue;
673 if (GET_USER_OR_KERNEL(flagID, ev->flagID)) /* pgr0039 */
675 //GET_USER_OR_KERNEL(bits, ev->bits); /* pgr0039 */
676 if (mem == evk_mem_user) {
677 if (copy_from_user(&bits, &(ev->bits), sizeof(bits)))
683 EVK_mutex_lock(); /*************************************/
685 queue = (struct flag64 *)find_queue_entry(flagID); /* pgr0000 */
687 EVK_info1("set_flag64: No such ID %x\n", flagID);
692 if (queue->value == 0) {
693 queue->seq_num = sequence_number++;
695 queue->value |= bits; /* pgr0000 */
697 wake_up_interruptible(&(queue->wq_head));
700 EVK_mutex_unlock(); /***********************************/
705 evk_store_message(EV_Message *ev, int mem)
707 struct message_queue *queue;
712 UINT32 length, senderInfo, seq;
714 if (GET_USER_OR_KERNEL(queueID, ev->queueID)) /* pgr0039 */
716 if (GET_USER_OR_KERNEL(length, ev->length)) /* pgr0039 */
718 if (GET_USER_OR_KERNEL(senderInfo, ev->senderInfo)) /* pgr0039 */
721 EVK_mutex_lock(); /*************************************/
723 queue = (struct message_queue *)find_queue_entry(queueID); /* pgr0000 */
725 EVK_info1("store_message: No such queueID %x\n", queueID);
730 if (length > queue->max_bytes) { /* pgr0000 */
731 EVK_info0("store_message: message is too long for the queue");
736 if (queue->num == queue->length) {
738 switch(queue->type) {
739 case EV_MESSAGE_QUEUE_TYPE_BUSY:
740 EVK_info1("store_message: queue %x BUSY\n", queueID);
745 case EV_MESSAGE_QUEUE_TYPE_FIFO:
747 queue->readptr %= queue->length;
751 case EV_MESSAGE_QUEUE_TYPE_REPLACE:
756 EVK_BUG("internal error in store_message\n");
763 writeptr = (queue->readptr + queue->num) % queue->length;
764 ptr = queue->message + writeptr * EVK_message_block_size(queue->max_bytes);
766 memcpy(ptr, &length, sizeof(length));
767 ptr += sizeof(length);
768 memcpy(ptr, &senderInfo, sizeof(senderInfo));
769 ptr += sizeof(senderInfo);
770 seq = sequence_number++;
771 memcpy(ptr, &seq, sizeof(seq));
774 if (queue->num == 0) {
775 queue->seq_num = seq;
779 if (mem == evk_mem_user) {
780 if (copy_from_user(ptr, ev->message, length)) {
785 memcpy(ptr, ev->message, length);
788 wake_up_interruptible(&(queue->wq_head));
791 EVK_mutex_unlock(); /***********************************/
797 evk_set_poll(struct file *filp, EV_ID queueID)
799 struct common_queue *queue;
802 EVK_mutex_lock(); /*************************************/
804 queue = find_queue_entry(queueID);
806 EVK_info1("set_poll: ID %x not found.\n", queueID);
811 filp->private_data = (void *)queueID;
814 EVK_mutex_unlock(); /*************************************/
819 evk_get_flag_event(EV_Event *ev, int peek_only, int wait, int mem)
821 struct flag *queue, *queue2;
826 if (GET_USER_OR_KERNEL(flagID, ev->u.flag.flagID)) /* pgr0039 */
831 queue = (struct flag *)find_queue_entry(flagID); /* pgr0000 */
833 EVK_info1("get_flag: No such flag %x\n", flagID);
838 if (queue->value != 0) {
841 if (GET_USER_OR_KERNEL(bits, ev->u.flag.bits)) { /* pgr0039 */
846 if (bits == 0 || ((bits & queue->value) != 0)) { /* pgr0000 */
848 if (PUT_USER_OR_KERNEL(EV_EVENT_Flag, ev->type)) { /* pgr0039 */
852 if (PUT_USER_OR_KERNEL(queue->value, ev->u.flag.bits)) { /* pgr0039 */
858 queue->read_pid = current->pid;
868 if (queue != NULL && wait != 0 && found == 0) {
870 EVK_mutex_unlock(); /*************************************/
873 = wait_event_interruptible(queue->wq_head,
874 ((queue2 = (struct flag *)find_queue_entry(flagID)) == NULL
875 || queue2->value != 0));
877 EVK_mutex_lock(); /*************************************/
880 EVK_info1("Interrupted while waiting for flag %x\n", flagID);
882 } else if (queue2 == NULL) { /* pgr0039 */
883 EVK_info1("flag %x was destroyed while waiting for it\n", flagID);
893 evk_get_flag64_event(EV_Event *ev, int peek_only, int wait, int mem)
895 struct flag64 *queue, *queue2;
900 if (GET_USER_OR_KERNEL(flagID, ev->u.flag64.flagID)) /* pgr0039 */
905 queue = (struct flag64 *)find_queue_entry(flagID); /* pgr0000 */
907 EVK_info1("get_flag64: No such flag %x\n", flagID);
912 if (queue->value != 0) {
915 //GET_USER_OR_KERNEL(bits, ev->u.flag64.bits); /* pgr0039 */
916 if (mem == evk_mem_user) {
917 if (copy_from_user(&bits, &(ev->u.flag64.bits), sizeof(bits))) {
922 bits = ev->u.flag64.bits;
925 if (bits == 0 || ((bits & queue->value) != 0)) { /* pgr0000 */
927 if (PUT_USER_OR_KERNEL(EV_EVENT_Flag64, ev->type)) { /* pgr0039 */
931 if (PUT_USER_OR_KERNEL(queue->value, ev->u.flag64.bits)) { /* pgr0039 */
937 queue->read_pid = current->pid;
947 if (queue != NULL && wait != 0 && found == 0) {
949 EVK_mutex_unlock(); /*************************************/
952 = wait_event_interruptible(queue->wq_head,
953 ((queue2 = (struct flag64 *)find_queue_entry(flagID)) == NULL
954 || queue2->value != 0));
956 EVK_mutex_lock(); /*************************************/
959 EVK_info1("Interrupted while waiting for flag %x\n", flagID);
961 } else if (queue2 == NULL) { /* pgr0039 */
962 EVK_info1("flag %x was destroyed while waiting for it\n", flagID);
971 static __inline__ void
972 remove_message_event(struct message_queue *queue, UINT8 removeptr)
981 offset = (int)removeptr - (int)(queue->readptr);
983 if (offset == 0) {/* To remove the head of the queue, advance the queue by one readptr only */
985 queue->readptr %= queue->length;
987 if (queue->num > 0) {
988 /* Reset the occurrence time of the first message in the queue to the occurrence time of the queue. */
989 ptr = (queue->message
990 + queue->readptr * EVK_message_block_size(queue->max_bytes));
991 ptr += sizeof(UINT32) * 2;
992 memcpy(&(queue->seq_num), ptr, sizeof(UINT32));
997 offset += queue->length;
999 if (offset == queue->num) {/* Do nothing to delete the end of the queue */
1003 /* To delete a message in the middle of the queue, pack the following messages. */
1005 size = EVK_message_block_size(queue->max_bytes);
1007 for(i = 0 ; i < queue->num - offset ; i++, to++) {
1008 to %= queue->length;
1009 from = (to + 1) % queue->length;
1010 pFrom = queue->message + from * size;
1011 pTo = queue->message + to * size;
1012 memcpy(pTo, pFrom, size);
1017 evk_get_message_event(EV_Event *ev, int peek_only, int wait, int mem)
1019 struct message_queue *queue, *queue2;
1027 if (GET_USER_OR_KERNEL(queueID, ev->u.message.queueID)) /* pgr0039 */
1031 queue = (struct message_queue *)find_queue_entry(queueID); /* pgr0000 */
1032 if (queue == NULL) {
1033 EVK_info1("get_message: No such queue %x\n", queueID);
1039 readptr = queue->readptr;
1040 for(i = 0 ; i < num ; i++, readptr = (readptr + 1) % queue->length) {
1041 UINT32 size, senderInfo, seq;
1042 UINT32 length, q_senderInfo;
1044 ptr = (queue->message
1045 + readptr * EVK_message_block_size(queue->max_bytes));
1047 memcpy(&size, ptr, sizeof(size));
1048 ptr += sizeof(size);
1049 memcpy(&senderInfo, ptr, sizeof(senderInfo));
1050 ptr += sizeof(senderInfo);
1051 memcpy(&seq, ptr, sizeof(seq));
1054 if (GET_USER_OR_KERNEL(length, ev->u.message.length)) { /* pgr0039 */
1058 if (GET_USER_OR_KERNEL(q_senderInfo, ev->u.message.senderInfo)) { /* pgr0039 */
1063 if (q_senderInfo == 0 && length == 0) { /* pgr0000 */
1065 } else if (q_senderInfo != 0 && q_senderInfo == senderInfo) {
1067 } else if (length > 0 && size >= length) { /* pgr0000 */
1069 if (mem == evk_mem_user) {
1071 compbytes = evk_malloc(length);
1072 if (compbytes != NULL) {
1073 if (copy_from_user(compbytes, &(ev->u.message.message), length)) {
1075 evk_free(compbytes);
1078 if (memcmp(ptr, compbytes, length) == 0) {
1081 evk_free(compbytes);
1084 if (memcmp(ptr, ev->u.message.message, length) == 0) {
1092 if (PUT_USER_OR_KERNEL(EV_EVENT_Message, ev->type)) { /* pgr0039 */
1096 if (PUT_USER_OR_KERNEL(size, ev->u.message.length)) { /* pgr0039 */
1100 if (PUT_USER_OR_KERNEL(senderInfo, ev->u.message.senderInfo)) { /* pgr0039 */
1104 if (mem == evk_mem_user) {
1105 if (copy_to_user(ev->u.message.message, ptr, size)) {
1110 memcpy(ev->u.message.message, ptr, size);
1113 queue->read_pid = current->pid;
1117 remove_message_event(queue, readptr);
1125 if (queue != NULL && wait != 0 && matched == 0) {
1127 EVK_mutex_unlock(); /*************************************/
1129 = wait_event_interruptible(queue->wq_head,
1130 ((queue2 = (struct message_queue *)find_queue_entry(queueID))==NULL
1131 || queue2->num > 0));
1133 EVK_mutex_lock(); /*************************************/
1135 if (wait_ret != 0) {
1136 EVK_info1("Interrupted while waiting for queue %x\n", queueID);
1138 } else if (queue2 == NULL) { /* pgr0039 */
1139 EVK_info1("queue %x was destroyed while waiting for it\n", queueID);
1150 evk_get_event(EV_Event *ev, int peek_only, int wait, int mem)
1155 if (GET_USER_OR_KERNEL(type, ev->type)) /* pgr0039 */
1157 if (PUT_USER_OR_KERNEL(EV_EVENT_None, ev->type)) /* pgr0039 */
1160 switch(type) { /* pgr0000 */
1162 EVK_mutex_lock(); /*************************************/
1163 ret = evk_get_flag_event(ev, peek_only, wait, mem);
1164 EVK_mutex_unlock(); /*************************************/
1167 case EV_EVENT_Flag64:
1168 EVK_mutex_lock(); /*************************************/
1169 ret = evk_get_flag64_event(ev, peek_only, wait, mem);
1170 EVK_mutex_unlock(); /*************************************/
1173 case EV_EVENT_Message:
1174 EVK_mutex_lock(); /*************************************/
1175 ret = evk_get_message_event(ev, peek_only, wait, mem);
1176 EVK_mutex_unlock(); /*************************************/
1186 evk_get_next_event(EVK_Next_Event_Query *query /* user */, int peek_only)
1189 int i, num, ret, first, found;
1190 struct common_queue *queue;
1191 UINT32 seq_oldest = 0;
1193 ids = (EV_ID *)kmalloc( (sizeof(EV_ID)*EV_MAX_IDS_IN_PROCESS), GFP_KERNEL );
1198 if (__get_user(num, &(query->num))) { /* pgr0039 */
1202 if (copy_from_user(&ids[0], query->ids, num * sizeof(EV_ID))) { /* pgr0039 */
1206 if (__put_user(EV_EVENT_None, &(query->ev.type))) { /* pgr0039 */
1215 EVK_mutex_lock(); /*************************************/
1217 for(i = 0 ; i < num /* pgr0039 */ ; i++) {
1218 queue = find_queue_entry(ids[i]);
1219 if (queue != NULL) {/* Have the specified queue ID */
1220 if ((EV_ID_IS_FLAG(ids[i])
1221 && ((struct flag *)queue)->value != 0)
1222 || (EV_ID_IS_FLAG64(ids[i])
1223 && ((struct flag64 *)queue)->value != 0)
1224 || (EV_ID_IS_QUEUE(ids[i])
1225 && ((struct message_queue *)queue)->num > 0)) {/*There are events.*/
1226 /* Compare with time_before macros for round 0 */
1227 if (first || time_before((unsigned long)queue->seq_num, /* pgr0006 */ /* pgr0039 */
1228 (unsigned long)seq_oldest)) {
1230 seq_oldest = queue->seq_num;
1238 if (EV_ID_IS_FLAG(ids[found])) {
1239 if (__put_user(ids[found], &(query->ev.u.flag.flagID))) { /* pgr0039 */
1243 ret = evk_get_flag_event(&(query->ev), peek_only, 0, evk_mem_user);
1244 } else if (EV_ID_IS_FLAG64(ids[found])) {
1245 if (__put_user(ids[found], &(query->ev.u.flag64.flagID))) { /* pgr0039 */
1249 ret = evk_get_flag64_event(&(query->ev), peek_only, 0, evk_mem_user);
1250 } else if (EV_ID_IS_QUEUE(ids[found])) {
1251 if (__put_user(ids[found], &(query->ev.u.message.queueID))) { /* pgr0039 */
1255 ret = evk_get_message_event(&(query->ev), peek_only, 0, evk_mem_user);
1260 EVK_mutex_unlock(); /*************************************/
1267 evk_ioctl(struct file *filp, unsigned int cmd,
1270 EVK_Message_Queue_Request mesq;
1271 int peek_only, wait;
1278 case EVK_IOC_CREATE_FLAG:
1279 queueID = (EV_ID)arg;
1280 ret = evk_create_flag(queueID);
1283 case EVK_IOC_CREATE_FLAG64:
1284 queueID = (EV_ID)arg;
1285 ret = evk_create_flag64(queueID);
1288 case EVK_IOC_CREATE_MESSAGE_QUEUE:
1289 if (access_ok(VERIFY_READ, arg, sizeof(mesq))) { /* pgr0039 */
1290 if (copy_from_user(&mesq, (EV_Flag *)arg, sizeof(mesq))) {
1295 ret = evk_create_message_queue(mesq.queueID, mesq.length,
1296 mesq.max_bytes, mesq.type);
1302 case EVK_IOC_ALLOC_FLAG_ID:
1303 if (access_ok(VERIFY_WRITE, arg, sizeof(queueID))) { /* pgr0039 */
1304 evk_alloc_flagID(&queueID);
1305 if (put_user(queueID, (EV_ID *)arg)) { /* pgr0039 */
1315 case EVK_IOC_ALLOC_FLAG64_ID:
1316 if (access_ok(VERIFY_WRITE, arg, sizeof(queueID))) { /* pgr0039 */
1317 evk_alloc_flag64ID(&queueID);
1318 if (put_user(queueID, (EV_ID *)arg)) { /* pgr0039 */
1328 case EVK_IOC_ALLOC_QUEUE_ID:
1329 if (access_ok(VERIFY_WRITE, arg, sizeof(queueID))) { /* pgr0039 */
1330 evk_alloc_queueID(&queueID);
1331 if (put_user(queueID, (EV_ID *)arg)) { /* pgr0039 */
1341 case EVK_IOC_DESTROY_QUEUE:
1342 queueID = (EV_ID)arg;
1343 ret = evk_destroy_queue(queueID);
1346 case EVK_IOC_STORE_FLAG:
1347 if (access_ok(VERIFY_READ, arg, sizeof(EV_Flag))) { /* pgr0039 */
1348 ret = evk_store_flag((EV_Flag *)arg, evk_mem_user);
1354 case EVK_IOC_STORE_FLAG64:
1355 if (access_ok(VERIFY_READ, arg, sizeof(EV_Flag64))) { /* pgr0039 */
1356 ret = evk_store_flag64((EV_Flag64 *)arg, evk_mem_user);
1362 case EVK_IOC_STORE_MESSAGE:
1363 if (access_ok(VERIFY_READ, arg, sizeof(EV_Message))) { /* pgr0039 */
1364 ret = evk_store_message((EV_Message *)arg, evk_mem_user);
1370 case EVK_IOC_SET_POLL:
1371 queueID = (EV_ID)arg;
1372 ret = evk_set_poll(filp, queueID);
1375 case EVK_IOC_PEEK_EVENT:
1381 case EVK_IOC_WAIT_EVENT:
1387 case EVK_IOC_GET_EVENT:
1391 if (access_ok(VERIFY_WRITE, arg, sizeof(EV_Event))) { /* pgr0039 */
1392 ret = evk_get_event((EV_Event *)arg, peek_only, wait, evk_mem_user);
1398 case EVK_IOC_PEEK_NEXT_EVENT:
1403 case EVK_IOC_GET_NEXT_EVENT:
1406 if (access_ok(VERIFY_WRITE, arg, sizeof(EVK_Next_Event_Query))) { /* pgr0039 */
1407 ret = evk_get_next_event((EVK_Next_Event_Query *)arg, peek_only);
1413 case EVK_IOC_DEBUG_LIST:
1429 evk_poll(struct file *filp, poll_table *wait)
1431 unsigned int ret = 0;
1432 struct common_queue *queue;
1434 EV_ID queueID = (EV_ID)(filp->private_data);
1435 // Returns errors without stopping at assert if queueID is invalid
1436 // (Troubleshooting for Continuous Printing)
1437 if (!EV_ID_IS_VALID(queueID)) {
1438 printk("evk_poll ERROR: invalid queueID=%x\n", queueID);
1439 return POLLERR|POLLHUP;
1441 //EVK_assert(EV_ID_IS_VALID(queueID), "poll: flag/queueID not set");
1443 EVK_mutex_lock();/*****************************************/
1445 queue = find_queue_entry(queueID);
1446 if (queue == NULL) {
1447 EVK_info1("poll: No such flag/queueID %x\n", queueID);
1448 ret = POLLERR|POLLHUP;
1452 poll_wait(filp, &(queue->wq_head), wait);
1454 if (EV_ID_IS_FLAG(queueID)) {
1455 if (((struct flag *)queue)->value != 0) {
1458 } else if (EV_ID_IS_FLAG64(queueID)) {
1459 if (((struct flag64 *)queue)->value != 0) {
1463 if (((struct message_queue *)queue)->num > 0) {
1469 EVK_mutex_unlock(); /***************************************/
1474 /** List of system call corresponding function registrations */
1475 static struct file_operations evk_fops = {
1477 .release = evk_close,
1478 .unlocked_ioctl = evk_ioctl,
1482 #ifdef ENABLE_PROC_FS
1484 evk_procFS_show(struct seq_file *m, int mode)
1487 struct list_head *list, *entries;
1488 struct common_queue *queue;
1490 seq_printf(m, "[ev library status ");
1494 seq_printf(m, "(flag)]\n");
1495 seq_printf(m, "PID moduleID flagID[hash] value\n");
1497 case evk_enum_flag64:
1498 seq_printf(m, "(flag64)]\n");
1499 seq_printf(m, "PID moduleID flagID[hash] value\n");
1501 case evk_enum_queue:
1502 seq_printf(m, "(queue)]\n");
1503 seq_printf(m, "PID moduleID queueID[hash] maxbytes remain type\n");
1509 num = evk_get_queue_entry(&entries);
1511 for (i = 0 ; i < num ; i++) {
1512 list = &(entries[i]);
1513 if (!list_empty(list)) {
1514 list_for_each_entry(queue, list, list) {
1515 if ((mode == evk_enum_flag && (!EV_ID_IS_FLAG(queue->queueID)))
1516 || (mode == evk_enum_flag64 && (!EV_ID_IS_FLAG64(queue->queueID)))
1517 || (mode == evk_enum_queue && (!EV_ID_IS_QUEUE(queue->queueID))))
1522 seq_printf(m, "%08d ", queue->pid);
1523 seq_printf(m, "%05d(%04x) ", ((queue->queueID & 0x00ffff00) >> 8), ((queue->queueID & 0x00ffff00) >> 8));
1524 seq_printf(m, "0x%08x[%2d] ", queue->queueID, calc_hash(queue->queueID));
1528 seq_printf(m, "0x%x", EVK_PFLAG(queue)->value);
1531 case evk_enum_flag64:
1532 seq_printf(m, "0x%llx", EVK_PFLAG64(queue)->value);
1535 case evk_enum_queue:
1536 seq_printf(m, "%04d %02d ", EVK_PQUEUE(queue)->max_bytes, EVK_PQUEUE(queue)->length);
1537 seq_printf(m, "%02d ", EVK_PQUEUE(queue)->num);
1538 seq_printf(m, "%d ", EVK_PQUEUE(queue)->type);
1541 seq_printf(m, "\n");
1551 evk_procFS_flag_show(struct seq_file *m, void *v)
1553 return evk_procFS_show(m, evk_enum_flag);
1557 evk_procFS_flag64_show(struct seq_file *m, void *v)
1559 return evk_procFS_show(m, evk_enum_flag64);
1563 evk_procFS_queue_show(struct seq_file *m, void *v)
1565 return evk_procFS_show(m, evk_enum_queue);
1569 evk_procFS_flag_open(struct inode *inode, struct file *file)
1571 return single_open(file, evk_procFS_flag_show, NULL);
1575 evk_procFS_flag64_open(struct inode *inode, struct file *file)
1577 return single_open(file, evk_procFS_flag64_show, NULL);
1581 evk_procFS_queue_open(struct inode *inode, struct file *file)
1583 return single_open(file, evk_procFS_queue_show, NULL);
1586 static struct file_operations evk_proc_flag_fops = {
1587 .owner = THIS_MODULE,
1588 .open = evk_procFS_flag_open,
1590 .llseek = seq_lseek,
1591 .release = single_release,
1594 static struct file_operations evk_proc_flag64_fops = {
1595 .owner = THIS_MODULE,
1596 .open = evk_procFS_flag64_open,
1598 .llseek = seq_lseek,
1599 .release = single_release,
1602 static struct file_operations evk_proc_queue_fops = {
1603 .owner = THIS_MODULE,
1604 .open = evk_procFS_queue_open,
1606 .llseek = seq_lseek,
1607 .release = single_release,
1609 #endif /*ENABLE_PROC_FS*/
1615 #ifdef CONFIG_PROC_FS
1616 #ifdef ENABLE_PROC_FS
1617 struct proc_dir_entry *ret;
1618 #endif /* ENABLE_PROC_FS */
1619 #endif /* CONFIG_PROC_FS */
1623 dev = MKDEV(devmajor, devminor);
1624 err = register_chrdev_region(dev, nrdevs, EVK_NAME);
1626 EVK_info1("register_chrdev_region error %d\n", -err);
1630 cdev_init(&cdev, &evk_fops);
1631 cdev.owner = THIS_MODULE;
1632 cdev.ops = &evk_fops;
1634 err = cdev_add(&cdev, dev, 1);
1636 EVK_info1("cdev_add error %d\n", -err);
1640 /* Initialization */
1641 for(i = 0 ; i < EVK_MAX_FLAGS ; i++) {
1642 list_add_tail((struct list_head *)&(_flag_pool[i]),
1645 for(i = 0 ; i < EVK_MAX_FLAG64S ; i++) {
1646 list_add_tail((struct list_head *)&(_flag64_pool[i]),
1649 for(i = 0 ; i < EVK_MAX_MESSAGE_QUEUES ; i++) {
1650 list_add_tail((struct list_head *)&(_message_queue_pool[i]),
1651 &message_queue_pool);
1653 for(i = 0 ; i < HASH_KEY ; i++) {
1654 INIT_LIST_HEAD(&(queue_entry[i]));
1657 #ifdef CONFIG_PROC_FS
1658 #ifdef ENABLE_PROC_FS
1659 ret = proc_create("driver/ev_flag", 0, NULL, &evk_proc_flag_fops);
1661 EVK_info1("Unable to initialize /proc entry %d\n", -err);
1665 ret = proc_create("driver/ev_flag64", 0, NULL, &evk_proc_flag64_fops);
1667 EVK_info1("Unable to initialize /proc entry %d\n", -err);
1671 ret = proc_create("driver/ev_queue", 0, NULL, &evk_proc_queue_fops);
1673 EVK_info1("Unable to initialize /proc entry %d\n", -err);
1676 #endif /* ENABLE_PROC_FS */
1677 #endif /* CONFIG_PROC_FS */
1679 pClass = class_create(THIS_MODULE, EVK_NAME);
1680 device_create(pClass, NULL, dev, NULL, "agl/"EVK_NAME);
1686 //#ifndef CONFIG_COMBINE_MODULES
1691 dev_t dev = MKDEV(devmajor, devminor);
1692 device_destroy(pClass, dev);
1693 class_destroy(pClass);
1697 unregister_chrdev_region(dev, nrdevs);
1699 remove_proc_entry( "driver/ev_flag", 0 );
1700 remove_proc_entry( "driver/ev_flag64", 0 );
1701 remove_proc_entry( "driver/ev_queue", 0 );
1708 EVK_create_flag_in(EV_ID flagID)
1710 int ret = evk_create_flag(flagID);
1712 if (ret == -EEXIST) {
1713 return EV_ERR_Exist;
1714 } else if (ret < 0) {
1715 return EV_ERR_Fatal;
1722 EVK_create_flag64_in(EV_ID flagID)
1724 int ret = evk_create_flag64(flagID);
1726 if (ret == -EEXIST) {
1727 return EV_ERR_Exist;
1728 } else if (ret < 0) {
1729 return EV_ERR_Fatal;
1735 /** @see EV_create_flag */
1737 EVK_create_flag(EV_ID flagID)
1739 if (!EV_ID_IS_FLAG(flagID) || EV_ID_IS_AUTO_ID(flagID)) {
1740 return EV_ERR_Invalid_ID;
1742 return EVK_create_flag_in(flagID);
1745 /** @see EV_create_flag64 */
1747 EVK_create_flag64(EV_ID flagID)
1749 if (!EV_ID_IS_FLAG64(flagID) || EV_ID_IS_AUTO_ID(flagID)) {
1750 return EV_ERR_Invalid_ID;
1752 return EVK_create_flag64_in(flagID);
1756 EVK_create_queue_in(EV_ID queueID, UINT8 length, UINT16 max_bytes,
1757 EV_Message_Queue_Type type)
1759 int ret = evk_create_message_queue(queueID, length, max_bytes, type);
1761 if (ret == -EEXIST) {
1762 return EV_ERR_Exist;
1763 } else if (ret < 0) {
1764 return EV_ERR_Fatal;
1770 /** @see EV_create_queue */
1772 EVK_create_queue(EV_ID queueID, UINT8 length, UINT16 max_bytes,
1773 EV_Message_Queue_Type type)
1775 if (!EV_ID_IS_QUEUE(queueID) || EV_ID_IS_AUTO_ID(queueID)) {
1776 return EV_ERR_Invalid_ID;
1778 return EVK_create_queue_in(queueID, length, max_bytes, type);
1781 /** @see EV_create_flag_auto_id */
1783 EVK_create_flag_auto_id(/* OUT */EV_ID *flagID)
1786 EVK_assert(flagID != NULL, "NULL pointer was specified");
1788 if (evk_alloc_flagID(flagID) < 0) {
1789 return EV_ERR_Fatal;
1792 err = EVK_create_flag_in(*flagID);
1799 /** @see EV_create_flag64_auto_id */
1801 EVK_create_flag64_auto_id(/* OUT */EV_ID *flagID)
1804 EVK_assert(flagID != NULL, "NULL pointer was specified");
1806 if (evk_alloc_flag64ID(flagID) < 0) {
1807 return EV_ERR_Fatal;
1810 err = EVK_create_flag64_in(*flagID);
1817 /** @see EV_create_queue_auto_id */
1819 EVK_create_queue_auto_id(/* OUT */EV_ID *queueID, UINT8 length,
1820 UINT16 max_bytes, EV_Message_Queue_Type type)
1823 EVK_assert(queueID != NULL, "NULL pointer was specified");
1825 if (evk_alloc_queueID(queueID) < 0) {
1826 return EV_ERR_Fatal;
1829 err = EVK_create_queue_in(*queueID, length, max_bytes, type);
1831 *queueID = EV_NO_ID;
1836 /** @see EV_destroy_flag */
1838 EVK_destroy_flag(EV_ID flagID)
1841 err = evk_destroy_queue(flagID);
1843 if (err == -ENOENT) {
1844 return EV_ERR_Invalid_ID;
1845 } else if (err < 0) {
1846 return EV_ERR_Fatal;
1852 /** @see EV_destroy_queue */
1854 EVK_destroy_queue(EV_ID queueID)
1856 return EVK_destroy_flag(queueID);
1859 /* Sending the event */
1860 /** @see EV_set_flag */
1862 EVK_set_flag(EV_ID flagID, UINT32 bits)
1867 if (!EV_ID_IS_FLAG(flagID)) {
1868 return EV_ERR_Invalid_ID;
1870 flag.flagID = flagID;
1873 ret = evk_store_flag(&flag, evk_mem_kernel);
1874 if (ret == -ENOENT) {
1875 return EV_ERR_Invalid_ID;
1876 } else if (ret < 0) {
1877 return EV_ERR_Fatal;
1883 /** @see EV_set_flag64 */
1885 EVK_set_flag64(EV_ID flagID, UINT64 bits)
1890 if (!EV_ID_IS_FLAG64(flagID)) {
1891 return EV_ERR_Invalid_ID;
1893 flag.flagID = flagID;
1896 ret = evk_store_flag64(&flag, evk_mem_kernel);
1897 if (ret == -ENOENT) {
1898 return EV_ERR_Invalid_ID;
1899 } else if (ret < 0) {
1900 return EV_ERR_Fatal;
1906 /** @see EV_send_message */
1908 EVK_send_message(EV_ID queueID, UINT16 bytes, const void *message,
1911 EV_Message *msg = NULL;
1915 msg = evk_malloc( sizeof( EV_Message ) );
1918 ev_ret = EV_ERR_Fatal;
1922 if (!EV_ID_IS_QUEUE(queueID)) {
1923 ev_ret = EV_ERR_Invalid_ID;
1926 EVK_assert(message != NULL, "NULL pointer was specified");
1927 EVK_assert(bytes <= EV_MAX_MESSAGE_LENGTH, "send_message: message too long");
1929 msg->queueID = queueID;
1930 msg->senderInfo = senderInfo;
1931 msg->length = bytes;
1932 memcpy(msg->message, message, bytes);
1934 ret = evk_store_message(msg, evk_mem_kernel);
1935 if (ret == -ENOENT) {
1936 ev_ret = EV_ERR_Invalid_ID;
1937 } else if (ret == -EBUSY) {
1938 ev_ret = EV_ERR_Busy;
1939 } else if (ret < 0) {
1940 ev_ret = EV_ERR_Fatal;
1953 /* Event acquisition(Order of arrival time) */
1954 //EV_ERR EV_get_next_event(/* OUT */EV_Event *ev);
1957 EVK_get_flag_in(EV_ID flagID, EV_Flag *flag, int peek_only, int wait)
1959 EV_Event *ev = NULL;
1963 ev = evk_malloc( sizeof( EV_Event ) );
1966 ev_ret = EV_ERR_Fatal;
1970 EVK_assert(flag != NULL, "get_flag: NULL pointer was specified");
1971 flag->flagID = EV_NO_ID;
1974 if (!EV_ID_IS_FLAG(flagID)) {
1975 ev_ret = EV_ERR_Invalid_ID;
1979 ev->type = EV_EVENT_Flag;
1980 ev->u.flag.flagID = flagID;
1981 ev->u.flag.bits = 0;
1983 ret = evk_get_event(ev, peek_only, wait, evk_mem_kernel);
1986 if (ret == -ENOENT) {
1987 ev_ret = EV_ERR_Invalid_ID;
1988 } else if (ret == -EINTR) {
1989 ev_ret = EV_ERR_Interrupted;
1991 ev_ret = EV_ERR_Fatal;
1996 if (ev->type == EV_EVENT_Flag) {
1997 flag->flagID = ev->u.flag.flagID;
1998 flag->bits = ev->u.flag.bits;
2011 EVK_get_flag64_in(EV_ID flagID, EV_Flag64 *flag, int peek_only, int wait)
2013 EV_Event *ev = NULL;
2017 ev = evk_malloc( sizeof( EV_Event ) );
2020 ev_ret = EV_ERR_Fatal;
2024 EVK_assert(flag != NULL, "get_flag64: NULL pointer was specified");
2025 flag->flagID = EV_NO_ID;
2028 if (!EV_ID_IS_FLAG64(flagID)) {
2029 ev_ret = EV_ERR_Invalid_ID;
2033 ev->type = EV_EVENT_Flag64;
2034 ev->u.flag64.flagID = flagID;
2035 ev->u.flag64.bits = 0;
2037 ret = evk_get_event(ev, peek_only, wait, evk_mem_kernel);
2040 if (ret == -ENOENT) {
2041 ev_ret = EV_ERR_Invalid_ID;
2042 } else if (ret == -EINTR) {
2043 ev_ret = EV_ERR_Interrupted;
2045 ev_ret = EV_ERR_Fatal;
2050 if (ev->type == EV_EVENT_Flag64) {
2051 flag->flagID = ev->u.flag64.flagID;
2052 flag->bits = ev->u.flag64.bits;
2064 /* Event acquisition(With Search Criteria) */
2065 /** @see EV_get_flag */
2067 EVK_get_flag(EV_ID flagID, /* OUT */EV_Flag *flag)
2069 return EVK_get_flag_in(flagID, flag, 0, 0);
2072 /** @see EV_get_flag64 */
2074 EVK_get_flag64(EV_ID flagID, /* OUT */EV_Flag64 *flag)
2076 return EVK_get_flag64_in(flagID, flag, 0, 0);
2079 /** @see EV_wait_flag */
2081 EVK_wait_flag(EV_ID flagID, /* OUT */EV_Flag *flag)/* block */
2083 return EVK_get_flag_in(flagID, flag, 0, 1);
2086 /** @see EV_wait_flag64 */
2088 EVK_wait_flag64(EV_ID flagID, /* OUT */EV_Flag64 *flag)/* block */
2090 return EVK_get_flag64_in(flagID, flag, 0, 1);
2093 /** @see EV_peek_flag */
2095 EVK_peek_flag(EV_ID flagID, /* OUT */EV_Flag *flag)
2097 return EVK_get_flag_in(flagID, flag, 1, 0);
2100 /** @see EV_peek_flag64 */
2102 EVK_peek_flag64(EV_ID flagID, /* OUT */EV_Flag64 *flag)
2104 return EVK_get_flag64_in(flagID, flag, 1, 0);
2108 EVK_get_message_in(EV_ID queueID, EV_Message *message, UINT32 senderInfo,
2109 UINT32 length, const void *compare_bytes,
2110 int peek_only, int wait)
2112 EV_Event *ev = NULL;
2116 ev = evk_malloc( sizeof( EV_Event ) );
2119 ev_ret = EV_ERR_Fatal;
2123 EVK_assert(message != NULL, "get_message: NULL pointer was specified");
2124 if (!EV_ID_IS_QUEUE(queueID)) {
2125 ev_ret = EV_ERR_Invalid_ID;
2128 message->queueID = EV_NO_ID;
2129 message->senderInfo = 0;
2130 message->length = 0;
2132 ev->type = EV_EVENT_Message;
2133 ev->u.message.queueID = queueID;
2134 ev->u.message.senderInfo = senderInfo;
2135 if (compare_bytes != NULL) {
2136 ev->u.message.length = length;
2137 memcpy(ev->u.message.message, compare_bytes, length);
2139 ev->u.message.length = 0;
2142 ret = evk_get_event(ev, peek_only, wait, evk_mem_kernel);
2145 if (ret == -ENOENT) {
2146 ev_ret = EV_ERR_Invalid_ID;
2147 } else if (ret == -EINTR) {
2148 ev_ret = EV_ERR_Interrupted;
2150 ev_ret = EV_ERR_Fatal;
2155 if (ev->type == EV_EVENT_Message) {
2156 message->queueID = ev->u.message.queueID;
2157 message->senderInfo = ev->u.message.senderInfo;
2158 message->length = ev->u.message.length;
2159 memcpy(message->message, ev->u.message.message, ev->u.message.length);
2171 /** @see EV_get_message */
2173 EVK_get_message(EV_ID queueID, /* OUT */EV_Message *message)
2175 return EVK_get_message_in(queueID, message, 0, 0, NULL, 0, 0);
2178 /** @see EV_wait_message */
2180 EVK_wait_message(EV_ID queueID, /* OUT */EV_Message *message)/* block */
2182 return EVK_get_message_in(queueID, message, 0, 0, NULL, 0, 1);
2185 /** @see EV_peek_message */
2187 EVK_peek_message(EV_ID queueID, /* OUT */EV_Message *message)
2189 return EVK_get_message_in(queueID, message, 0, 0, NULL, 1, 0);
2192 /** @see EV_find_message_by_sender */
2194 EVK_find_message_by_sender(EV_ID queueID, UINT32 senderInfo,
2195 /* OUT */EV_Message *message)
2198 return EV_ERR_Fatal;
2201 /** @see EV_find_message_by_content */
2203 EVK_find_message_by_content(EV_ID queueID, UINT16 length,
2204 const void *compare_bytes,
2205 /* OUT */EV_Message *message)
2208 return EV_ERR_Fatal;
2212 EXPORT_SYMBOL(evk_get_queue_entry);
2213 EXPORT_SYMBOL(EVK_init);
2214 EXPORT_SYMBOL(EVK_exit);
2216 EXPORT_SYMBOL(EVK_create_flag);
2217 EXPORT_SYMBOL(EVK_create_flag64);
2218 EXPORT_SYMBOL(EVK_create_queue);
2219 EXPORT_SYMBOL(EVK_create_flag_auto_id);
2220 EXPORT_SYMBOL(EVK_create_flag64_auto_id);
2221 EXPORT_SYMBOL(EVK_create_queue_auto_id);
2222 EXPORT_SYMBOL(EVK_destroy_flag);
2223 EXPORT_SYMBOL(EVK_destroy_queue);
2224 EXPORT_SYMBOL(EVK_set_flag);
2225 EXPORT_SYMBOL(EVK_set_flag64);
2226 EXPORT_SYMBOL(EVK_send_message);
2227 EXPORT_SYMBOL(EVK_get_flag);
2228 EXPORT_SYMBOL(EVK_wait_flag);
2229 EXPORT_SYMBOL(EVK_peek_flag);
2230 EXPORT_SYMBOL(EVK_get_flag64);
2231 EXPORT_SYMBOL(EVK_wait_flag64);
2232 EXPORT_SYMBOL(EVK_peek_flag64);
2233 EXPORT_SYMBOL(EVK_get_message);
2234 EXPORT_SYMBOL(EVK_wait_message);
2235 EXPORT_SYMBOL(EVK_peek_message);
2237 #ifndef CONFIG_COMBINE_MODULES
2238 //MODULE_LICENSE("proprietary");
2239 MODULE_LICENSE("GPL");
2240 MODULE_DESCRIPTION("EVent library for Kernel");
2241 //MODULE_SUPPORTED_DEVICE(name);
2242 //MODULE_PARM(var,type)
2243 //MODULE_PARM_DESC(var,desc)
2244 module_init(EVK_init);
2245 module_exit(EVK_exit);
2246 #endif /* !CONFIG_COMBINE_MODULES */