2 * drivers/agl/evk_lib.c
4 * Event library (kernel space part)
6 * @copyright Copyright (c) 2016-2019 TOYOTA MOTOR CORPORATION.
8 * This program is free software; you can redistribute it and/or modify it
9 * under the terms of the GNU General Public License published
10 * by the Free Software Foundation.
12 * This program is distributed in the hope that it will be useful,
13 * but WITHOUT ANY WARRANTY; without even the implied warranty of
14 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
15 * GNU General Public License for more details.
17 * You should have received a copy of the GNU General Public License
18 * along with this program. If not, see <http://www.gnu.org/licenses/>.
22 #include <generated/autoconf.h>
23 #include <linux/module.h>
24 #include <linux/init.h>
25 #include <linux/version.h>
26 #include <linux/kernel.h>
27 #include <linux/slab.h>
28 #include <linux/sched.h>
29 #include <linux/wait.h>
30 #include <linux/poll.h>
31 #include <linux/list.h>
32 #include <asm/uaccess.h>
33 #include <linux/errno.h>
34 #include <linux/vmalloc.h>
36 #include <linux/proc_fs.h>
37 #include <linux/seq_file.h>
39 #include <linux/device.h>
40 #include <linux/cdev.h>
42 #ifndef STANDARD_INT_DEFINITIONS
43 #define STANDARD_INT_DEFINITIONS
52 #endif /* !STANDARD_INT_DEFINITIONS */
54 #define ENABLE_PROC_FS 1
56 #include <agldd/evk_lib.h>
58 /** @addtogroup EV_in */
60 /** In Linux2.4, list_for_each_entry is not provided, so it is prepared by self (in 2.6)
62 #ifdef list_for_each_entry
63 #define __LINUX_26_OR_HIGHER
66 #ifndef __LINUX_26_OR_HIGHER /* linux v2.4 */
68 #define list_for_each_entry(pos, head, member) \
69 for (pos = list_entry((head)->next, typeof(*pos), member), \
70 prefetch(pos->member.next); \
71 &pos->member != (head); \
72 pos = list_entry(pos->member.next, typeof(*pos), member), \
73 prefetch(pos->member.next))
75 #define list_for_each_entry_safe(pos, n, head, member) \
76 for (pos = list_entry((head)->next, typeof(*pos), member), \
77 n = list_entry(pos->member.next, typeof(*pos), member); \
78 &pos->member != (head); \
79 pos = n, n = list_entry(n->member.next, typeof(*n), member))
81 #else /* linux v2.6 */
83 #include <linux/jiffies.h>
85 #endif /* linux v2.6 */
87 #define EVK_assert(cond, mesg) \
89 printk(KERN_ALERT "[EVK]ASSERT(pid:%d): " #cond " at %s:%d; " \
90 #mesg "\n", current->pid, __FILE__, __LINE__); \
94 #define EVK_BUG(mesg) \
95 printk(KERN_ALERT "[EVK]BUG: " mesg); \
99 #define EVK_info0(s) printk(KERN_ALERT "[EVK]INFO: " s)
100 #define EVK_info1(s, t) printk(KERN_ALERT "[EVK]INFO: " s, t)
103 #define EVK_info1(s, t)
106 static int devmajor = EVK_DEV_MAJOR;
107 static int devminor = 0;
108 static int nrdevs = 1;
109 static struct cdev cdev;
110 static struct class *pClass;
112 DEFINE_SEMAPHORE(evk_mtx);
114 static int down_line;
115 #define EVK_mutex_lock() { \
117 down_pid = current->pid; \
118 down_line = __LINE__; \
120 #define EVK_mutex_unlock() (up(&evk_mtx))
122 #ifdef EVK_USE_KMALLOC
123 #define evk_malloc(s) kmalloc((s), GFP_KERNEL)
124 #define evk_free kfree
125 #else // use vmalloc (this is the default)
126 #define evk_malloc(s) vmalloc((s))
127 #define evk_free vfree
141 #define GET_USER_OR_KERNEL(to, from) \
144 if (mem == evk_mem_user) { \
145 err = __get_user((to), &(from)); \
153 #define PUT_USER_OR_KERNEL(value, to) \
156 if (mem == evk_mem_user) { \
157 err = __put_user((value), &(to)); \
166 /** @brief Common part of the flag structure and message queue structure */
167 #define EVK_COMMON_QUEUE_ELEMS \
168 struct list_head list; /**< List structure */ \
169 wait_queue_head_t wq_head; /**< Wait_queue of a process waiting for a queue */\
170 EV_ID queueID; /**< Flag ID/Queue ID */ \
171 UINT32 seq_num; /**< Order of event arrival */ \
172 pid_t read_pid; /**< Read process ID */ \
173 pid_t pid; /**< Owning process ID */
175 /** @brief Common part of the flag structure and message queue structure */
176 struct common_queue {
177 EVK_COMMON_QUEUE_ELEMS
180 /** @brief Flag structure */
182 EVK_COMMON_QUEUE_ELEMS
183 UINT32 value;/**< Flags value */
185 #define EVK_PFLAG(queue) ((struct flag*)queue)
187 /** @brief 64-bit flag structure */
189 EVK_COMMON_QUEUE_ELEMS
190 UINT64 value;/**< Flags value */
192 #define EVK_PFLAG64(queue) ((struct flag64*)queue)
194 /** @brief Message queue structure */
195 struct message_queue {
196 EVK_COMMON_QUEUE_ELEMS
197 UINT8 type;/**< Type */
198 UINT8 length;/**< Queue length */
199 UINT8 num;/**< Number of messages stored */
200 UINT8 readptr;/**< Next read position(0~length-1) */
201 UINT32 max_bytes;/**< -Maximum bytes per message */
202 UINT8 *message;/**< Message storage area (ring buffer) */
204 #define EVK_PQUEUE(queue) ((struct message_queue*)queue)
206 /** @brief Number of bytes to allocate per message
208 * This function allocates an area to store the number of bytes actually stored, the time of occurrence,
209 * and the senderInfo, in addition to the number of bytes specified by max_bytes.
211 #define EVK_message_block_size(max_bytes) (sizeof(UINT32) * 3 + (max_bytes))
215 /** @brief Maximum number of flags used by all systems */
216 #define EVK_MAX_FLAGS 48
217 /** @brief Maximum number of 64-bit flags used by all systems */
218 #define EVK_MAX_FLAG64S 4
219 /** @brief Maximum number of message event queues used by all systems */
220 /* M1SP BM3547 MESSAGE_QUEUES 128->144 */
221 /* M9AT BM2066 MESSAGE_QUEUES 144->218 */
222 #define EVK_MAX_MESSAGE_QUEUES 224
224 /** @brief Allocate flag structure statically */
225 static struct flag _flag_pool[EVK_MAX_FLAGS];
226 /** @brief Statically allocates a 64-bit flag structure */
227 static struct flag64 _flag64_pool[EVK_MAX_FLAG64S];
228 /** @brief Beginning of the list of unused flags */
229 static LIST_HEAD(flag_pool);
230 /** @brief Beginning of the list of unused 64-bit flags */
231 static LIST_HEAD(flag64_pool);
233 /** @brief Allocate message queue structure statically */
234 static struct message_queue _message_queue_pool[EVK_MAX_MESSAGE_QUEUES];
235 /** @brief Top of the list of unused message queues */
236 static LIST_HEAD(message_queue_pool);
238 /** @brief List of Flags/Message Queues in Use
240 * Connects the usage flag/message queue to a list for each hash value obtained from the ID.
241 * The hash value is the remainder of the ID divided by HASH_KEY.
243 static struct list_head queue_entry[HASH_KEY];
245 /** @brief Sequence number to use during automatic ID assignment */
246 static EV_ID sequence_id = 0;
248 /** @brief Number to be assigned in order of occurrence of the event */
249 static UINT32 sequence_number = 0;
252 evk_get_queue_entry(struct list_head **entries)
255 *entries = queue_entry;
261 static __inline__ int
262 calc_hash(UINT32 val)
264 return val % HASH_KEY;
272 //EVK_assert(!list_empty(&flag_pool), "flag pool empty");
273 if (list_empty(&flag_pool)) {
274 printk("%s ERROR: flag pool empty\n", __func__);
277 queue = (struct flag *)flag_pool.next;
278 list_del_init(&(queue->list));
286 struct flag64 *queue;
287 //EVK_assert(!list_empty(&flag64_pool), "flag64 pool empty");
288 if (list_empty(&flag64_pool)) {
289 printk("%s ERROR: flag64 pool empty\n", __func__);
292 queue = (struct flag64 *)flag64_pool.next;
293 list_del_init(&(queue->list));
297 static __inline__ void
298 free_flag(struct flag *queue)
300 list_add((struct list_head *)queue, &flag_pool);
303 static __inline__ void
304 free_flag64(struct flag64 *queue)
306 list_add((struct list_head *)queue, &flag64_pool);
310 struct message_queue *
311 alloc_message_queue(void)
313 struct message_queue *queue;
314 //EVK_assert(!list_empty(&message_queue_pool), "message queue pool empty");
315 if (list_empty(&message_queue_pool)) {
316 printk("%s ERROR: message queue pool empty\n", __func__);
319 queue = (struct message_queue *)message_queue_pool.next;
320 list_del_init(&(queue->list));
321 queue->message = NULL;
325 static __inline__ void
326 free_message_queue(struct message_queue *queue)
328 if (queue->message != NULL) {
329 evk_free(queue->message);
330 queue->message = NULL;
332 list_add((struct list_head *)queue, &message_queue_pool);
336 struct common_queue *
337 find_queue_entry(EV_ID queueID)
339 struct list_head *list;
340 struct common_queue *queue;
341 int hash = calc_hash(queueID);
342 list = &(queue_entry[hash]);
343 list_for_each_entry(queue, list, list) { /* pgr0060 */ /* pgr0039 */
344 if (queue->queueID == queueID) {
351 static __inline__ void
352 attach_queue_entry(struct common_queue *queue, EV_ID queueID)
354 int hash = calc_hash(queueID);
355 list_add_tail((struct list_head *)queue, &(queue_entry[hash]));
358 static __inline__ void
359 detach_queue_entry(struct common_queue *queue)
361 list_del_init((struct list_head *)queue);
364 static __inline__ void
365 init_common_queue(struct common_queue *queue, EV_ID queueID)
367 queue->queueID = queueID;
368 queue->pid = current->pid;
370 init_waitqueue_head(&(queue->wq_head));
373 static __inline__ void
374 evk_init_flag(struct flag *queue, EV_ID queueID)
376 init_common_queue((struct common_queue *)queue, queueID);
380 static __inline__ void
381 evk_init_flag64(struct flag64 *queue, EV_ID queueID)
383 init_common_queue((struct common_queue *)queue, queueID);
387 static __inline__ int
388 evk_init_message_queue(struct message_queue *queue, EV_ID queueID,
389 UINT8 length, UINT32 max_bytes, UINT8 type)
391 init_common_queue((struct common_queue *)queue, queueID);
393 queue->length = length;
394 queue->max_bytes = max_bytes;
397 EVK_assert(queue->message == NULL, "message buffer was not freed");
398 queue->message = evk_malloc(length * EVK_message_block_size(max_bytes));
399 EVK_assert(queue->message != NULL, "can't alloc message buffer");
406 struct list_head *list;
407 struct common_queue *queue;
409 for(i = 0 ; i < HASH_KEY ; i++) {
410 list = &(queue_entry[i]);
411 if (!list_empty(list)) {
412 printk(KERN_ALERT "%d->", i);
413 list_for_each_entry(queue, list, list) { /* pgr0060 */ /* pgr0039 */
414 printk("%x[%x] ", queue->queueID, queue->seq_num);
422 evk_destroy_queue(EV_ID queueID)
424 struct common_queue *queue;
426 EVK_info1("flag destroy %x\n", queueID);
428 EVK_mutex_lock(); /*************************************/
429 queue = find_queue_entry(queueID);
435 detach_queue_entry(queue);
437 /* wake up processes before destruction */
438 wake_up_interruptible(&(queue->wq_head));
440 init_common_queue(queue, EV_INVALID_ID);
442 if (EV_ID_IS_FLAG(queueID)) {
443 free_flag((struct flag *)queue);
444 } else if (EV_ID_IS_FLAG64(queueID)) {
445 free_flag64((struct flag64 *)queue);
446 } else if (EV_ID_IS_QUEUE(queueID)) {
447 free_message_queue((struct message_queue *)queue);
451 EVK_mutex_unlock(); /*************************************/
456 evk_open(struct inode *inode, struct file *file)
458 // Recording of current and measures not to be read or deleted from others are required. */
459 file->private_data = (void *)EV_INVALID_ID;
464 evk_close(struct inode *inode, struct file *file)
466 if (EV_ID_IS_VALID((EV_ID)file->private_data)) {
467 evk_destroy_queue((EV_ID)file->private_data);
469 file->private_data = (void *)EV_INVALID_ID;
474 evk_create_flag(EV_ID queueID)
478 EVK_info1("flag create %x\n", queueID);
480 EVK_mutex_lock(); /*************************************/
482 queue = (struct flag *)find_queue_entry(queueID);
488 queue = alloc_flag();
494 evk_init_flag(queue, queueID);
495 attach_queue_entry((struct common_queue *)queue, queueID);
499 EVK_mutex_unlock(); /***********************************/
504 evk_create_flag64(EV_ID queueID)
506 struct flag64 *queue;
508 EVK_info1("flag64 create %x\n", queueID);
510 EVK_mutex_lock(); /*************************************/
512 queue = (struct flag64 *)find_queue_entry(queueID);
518 queue = alloc_flag64();
524 evk_init_flag64(queue, queueID);
525 attach_queue_entry((struct common_queue *)queue, queueID);
529 EVK_mutex_unlock(); /***********************************/
534 evk_create_message_queue(EV_ID queueID, UINT8 length,
535 UINT32 max_bytes, EV_Message_Queue_Type type)
537 struct message_queue *queue;
539 EVK_info1("message create %x\n", queueID);
543 EVK_mutex_lock(); /*************************************/
545 queue = (struct message_queue *)find_queue_entry(queueID);
551 queue = alloc_message_queue();
557 err = evk_init_message_queue(queue, queueID, length, max_bytes, type);
559 attach_queue_entry((struct common_queue *)queue, queueID);
561 free_message_queue(queue);
566 EVK_mutex_unlock(); /***********************************/
575 if ((sequence_id & EV_RESERVED_BIT) != 0) {/* round to 1 */
577 EVK_info0("auto ID rounded\n");
584 evk_alloc_flagID(EV_ID *queueID)
588 EVK_mutex_lock(); /*************************************/
590 seq_id = get_seq_id();
591 seq_id |= (EV_ID_BIT | EV_FLAG_BIT | EV_AUTO_ID_BIT);
592 } while(find_queue_entry(seq_id) != NULL);
593 EVK_mutex_unlock(); /*************************************/
600 evk_alloc_flag64ID(EV_ID *queueID)
604 EVK_mutex_lock(); /*************************************/
606 seq_id = get_seq_id();
607 seq_id |= (EV_ID_BIT | EV_FLAG64_BIT | EV_AUTO_ID_BIT);
608 } while(find_queue_entry(seq_id) != NULL);
609 EVK_mutex_unlock(); /*************************************/
616 evk_alloc_queueID(EV_ID *queueID)
620 EVK_mutex_lock(); /*************************************/
622 seq_id = get_seq_id();
623 seq_id |= (EV_ID_BIT | EV_QUEUE_BIT | EV_AUTO_ID_BIT);
624 } while(find_queue_entry(seq_id) != NULL);
625 EVK_mutex_unlock(); /*************************************/
632 evk_store_flag(EV_Flag *ev, int mem)
639 if (GET_USER_OR_KERNEL(flagID, ev->flagID)) /* pgr0039 */
641 if (GET_USER_OR_KERNEL(bits, ev->bits)) /* pgr0039 */
644 EVK_mutex_lock(); /*************************************/
646 queue = (struct flag *)find_queue_entry(flagID); /* pgr0000 */
648 EVK_info1("set_flag: No such ID %x\n", flagID);
653 if (queue->value == 0) {
654 queue->seq_num = sequence_number++;
656 queue->value |= bits; /* pgr0000 */
658 wake_up_interruptible(&(queue->wq_head));
661 EVK_mutex_unlock(); /***********************************/
666 evk_store_flag64(EV_Flag64 *ev, int mem)
668 struct flag64 *queue;
673 if (GET_USER_OR_KERNEL(flagID, ev->flagID)) /* pgr0039 */
675 //GET_USER_OR_KERNEL(bits, ev->bits); /* pgr0039 */
676 if (mem == evk_mem_user) {
677 if (copy_from_user(&bits, &(ev->bits), sizeof(bits)))
683 EVK_mutex_lock(); /*************************************/
685 queue = (struct flag64 *)find_queue_entry(flagID); /* pgr0000 */
687 EVK_info1("set_flag64: No such ID %x\n", flagID);
692 if (queue->value == 0) {
693 queue->seq_num = sequence_number++;
695 queue->value |= bits; /* pgr0000 */
697 wake_up_interruptible(&(queue->wq_head));
700 EVK_mutex_unlock(); /***********************************/
705 evk_store_message(EV_Message *ev, int mem)
707 struct message_queue *queue;
712 UINT32 length, senderInfo, seq;
714 if (GET_USER_OR_KERNEL(queueID, ev->queueID)) /* pgr0039 */
716 if (GET_USER_OR_KERNEL(length, ev->length)) /* pgr0039 */
718 if (GET_USER_OR_KERNEL(senderInfo, ev->senderInfo)) /* pgr0039 */
721 EVK_mutex_lock(); /*************************************/
723 queue = (struct message_queue *)find_queue_entry(queueID); /* pgr0000 */
725 EVK_info1("store_message: No such queueID %x\n", queueID);
730 if (length > queue->max_bytes) { /* pgr0000 */
731 EVK_info0("store_message: message is too long for the queue");
736 if (queue->num == queue->length) {
738 switch(queue->type) {
739 case EV_MESSAGE_QUEUE_TYPE_BUSY:
740 EVK_info1("store_message: queue %x BUSY\n", queueID);
745 case EV_MESSAGE_QUEUE_TYPE_FIFO:
747 queue->readptr %= queue->length;
751 case EV_MESSAGE_QUEUE_TYPE_REPLACE:
756 EVK_BUG("internal error in store_message\n");
763 writeptr = (queue->readptr + queue->num) % queue->length;
764 ptr = queue->message + writeptr * EVK_message_block_size(queue->max_bytes);
766 memcpy(ptr, &length, sizeof(length));
767 ptr += sizeof(length);
768 memcpy(ptr, &senderInfo, sizeof(senderInfo));
769 ptr += sizeof(senderInfo);
770 seq = sequence_number++;
771 memcpy(ptr, &seq, sizeof(seq));
774 if (queue->num == 0) {
775 queue->seq_num = seq;
779 if (mem == evk_mem_user) {
780 if (copy_from_user(ptr, ev->message, length)) {
785 memcpy(ptr, ev->message, length);
788 wake_up_interruptible(&(queue->wq_head));
791 EVK_mutex_unlock(); /***********************************/
797 evk_set_poll(struct file *filp, EV_ID queueID)
799 struct common_queue *queue;
802 EVK_mutex_lock(); /*************************************/
804 queue = find_queue_entry(queueID);
806 EVK_info1("set_poll: ID %x not found.\n", queueID);
811 filp->private_data = (void *)queueID;
814 EVK_mutex_unlock(); /*************************************/
819 evk_get_flag_event(EV_Event *ev, int peek_only, int wait, int mem)
821 struct flag *queue, *queue2;
826 if (GET_USER_OR_KERNEL(flagID, ev->u.flag.flagID)) /* pgr0039 */
831 queue = (struct flag *)find_queue_entry(flagID); /* pgr0000 */
833 EVK_info1("get_flag: No such flag %x\n", flagID);
838 if (queue->value != 0) {
841 if (GET_USER_OR_KERNEL(bits, ev->u.flag.bits)) { /* pgr0039 */
846 if (bits == 0 || ((bits & queue->value) != 0)) { /* pgr0000 */
848 if (PUT_USER_OR_KERNEL(EV_EVENT_Flag, ev->type)) { /* pgr0039 */
852 if (PUT_USER_OR_KERNEL(queue->value, ev->u.flag.bits)) { /* pgr0039 */
858 queue->read_pid = current->pid;
868 if (queue != NULL && wait != 0 && found == 0) {
870 EVK_mutex_unlock(); /*************************************/
873 = wait_event_interruptible(queue->wq_head,
874 ((queue2 = (struct flag *)find_queue_entry(flagID)) == NULL
875 || queue2->value != 0));
877 EVK_mutex_lock(); /*************************************/
880 EVK_info1("Interrupted while waiting for flag %x\n", flagID);
882 } else if (queue2 == NULL) { /* pgr0039 */
883 EVK_info1("flag %x was destroyed while waiting for it\n", flagID);
893 evk_get_flag64_event(EV_Event *ev, int peek_only, int wait, int mem)
895 struct flag64 *queue, *queue2;
900 if (GET_USER_OR_KERNEL(flagID, ev->u.flag64.flagID)) /* pgr0039 */
905 queue = (struct flag64 *)find_queue_entry(flagID); /* pgr0000 */
907 EVK_info1("get_flag64: No such flag %x\n", flagID);
912 if (queue->value != 0) {
915 //GET_USER_OR_KERNEL(bits, ev->u.flag64.bits); /* pgr0039 */
916 if (mem == evk_mem_user) {
917 if (copy_from_user(&bits, &(ev->u.flag64.bits), sizeof(bits))) {
922 bits = ev->u.flag64.bits;
925 if (bits == 0 || ((bits & queue->value) != 0)) { /* pgr0000 */
927 if (PUT_USER_OR_KERNEL(EV_EVENT_Flag64, ev->type)) { /* pgr0039 */
931 if (PUT_USER_OR_KERNEL(queue->value, ev->u.flag64.bits)) { /* pgr0039 */
937 queue->read_pid = current->pid;
947 if (queue != NULL && wait != 0 && found == 0) {
949 EVK_mutex_unlock(); /*************************************/
952 = wait_event_interruptible(queue->wq_head,
953 ((queue2 = (struct flag64 *)find_queue_entry(flagID)) == NULL
954 || queue2->value != 0));
956 EVK_mutex_lock(); /*************************************/
959 EVK_info1("Interrupted while waiting for flag %x\n", flagID);
961 } else if (queue2 == NULL) { /* pgr0039 */
962 EVK_info1("flag %x was destroyed while waiting for it\n", flagID);
971 static __inline__ void
972 remove_message_event(struct message_queue *queue, UINT8 removeptr)
981 offset = (int)removeptr - (int)(queue->readptr);
983 if (offset == 0) {/* To remove the head of the queue, advance the queue by one readptr only */
985 queue->readptr %= queue->length;
987 if (queue->num > 0) {
988 /* Reset the occurrence time of the first message in the queue to the occurrence time of the queue. */
989 ptr = (queue->message
990 + queue->readptr * EVK_message_block_size(queue->max_bytes));
991 ptr += sizeof(UINT32) * 2;
992 memcpy(&(queue->seq_num), ptr, sizeof(UINT32));
997 offset += queue->length;
999 if (offset == queue->num) {/* Do nothing to delete the end of the queue */
1003 /* To delete a message in the middle of the queue, pack the following messages. */
1005 size = EVK_message_block_size(queue->max_bytes);
1007 for(i = 0 ; i < queue->num - offset ; i++, to++) {
1008 to %= queue->length;
1009 from = (to + 1) % queue->length;
1010 pFrom = queue->message + from * size;
1011 pTo = queue->message + to * size;
1012 memcpy(pTo, pFrom, size);
1017 evk_get_message_event(EV_Event *ev, int peek_only, int wait, int mem)
1019 struct message_queue *queue, *queue2;
1027 if (GET_USER_OR_KERNEL(queueID, ev->u.message.queueID)) /* pgr0039 */
1031 queue = (struct message_queue *)find_queue_entry(queueID); /* pgr0000 */
1032 if (queue == NULL) {
1033 EVK_info1("get_message: No such queue %x\n", queueID);
1039 readptr = queue->readptr;
1040 for(i = 0 ; i < num ; i++, readptr = (readptr + 1) % queue->length) {
1041 UINT32 size, senderInfo, seq;
1042 UINT32 length, q_senderInfo;
1044 ptr = (queue->message
1045 + readptr * EVK_message_block_size(queue->max_bytes));
1047 memcpy(&size, ptr, sizeof(size));
1048 ptr += sizeof(size);
1049 memcpy(&senderInfo, ptr, sizeof(senderInfo));
1050 ptr += sizeof(senderInfo);
1051 memcpy(&seq, ptr, sizeof(seq));
1054 if (GET_USER_OR_KERNEL(length, ev->u.message.length)) { /* pgr0039 */
1058 if (GET_USER_OR_KERNEL(q_senderInfo, ev->u.message.senderInfo)) { /* pgr0039 */
1063 if (q_senderInfo == 0 && length == 0) { /* pgr0000 */
1065 } else if (q_senderInfo != 0 && q_senderInfo == senderInfo) {
1067 } else if (length > 0 && size >= length) { /* pgr0000 */
1069 if (mem == evk_mem_user) {
1071 compbytes = evk_malloc(length);
1072 if (compbytes != NULL) {
1073 if (copy_from_user(compbytes, &(ev->u.message.message), length)) {
1075 evk_free(compbytes);
1078 if (memcmp(ptr, compbytes, length) == 0) {
1081 evk_free(compbytes);
1084 if (memcmp(ptr, ev->u.message.message, length) == 0) {
1092 if (PUT_USER_OR_KERNEL(EV_EVENT_Message, ev->type)) { /* pgr0039 */
1096 if (PUT_USER_OR_KERNEL(size, ev->u.message.length)) { /* pgr0039 */
1100 if (PUT_USER_OR_KERNEL(senderInfo, ev->u.message.senderInfo)) { /* pgr0039 */
1104 if (mem == evk_mem_user) {
1105 if (copy_to_user(ev->u.message.message, ptr, size)) {
1110 memcpy(ev->u.message.message, ptr, size);
1113 queue->read_pid = current->pid;
1117 remove_message_event(queue, readptr);
1125 if (queue != NULL && wait != 0 && matched == 0) {
1127 EVK_mutex_unlock(); /*************************************/
1129 = wait_event_interruptible(queue->wq_head,
1130 ((queue2 = (struct message_queue *)find_queue_entry(queueID))==NULL
1131 || queue2->num > 0));
1133 EVK_mutex_lock(); /*************************************/
1135 if (wait_ret != 0) {
1136 EVK_info1("Interrupted while waiting for queue %x\n", queueID);
1138 } else if (queue2 == NULL) { /* pgr0039 */
1139 EVK_info1("queue %x was destroyed while waiting for it\n", queueID);
1150 evk_get_event(EV_Event *ev, int peek_only, int wait, int mem)
1155 if (GET_USER_OR_KERNEL(type, ev->type)) /* pgr0039 */
1157 if (PUT_USER_OR_KERNEL(EV_EVENT_None, ev->type)) /* pgr0039 */
1160 switch(type) { /* pgr0000 */
1162 EVK_mutex_lock(); /*************************************/
1163 ret = evk_get_flag_event(ev, peek_only, wait, mem);
1164 EVK_mutex_unlock(); /*************************************/
1167 case EV_EVENT_Flag64:
1168 EVK_mutex_lock(); /*************************************/
1169 ret = evk_get_flag64_event(ev, peek_only, wait, mem);
1170 EVK_mutex_unlock(); /*************************************/
1173 case EV_EVENT_Message:
1174 EVK_mutex_lock(); /*************************************/
1175 ret = evk_get_message_event(ev, peek_only, wait, mem);
1176 EVK_mutex_unlock(); /*************************************/
1186 evk_get_next_event(EVK_Next_Event_Query *query /* user */, int peek_only)
1189 int i, num, ret, first, found;
1190 struct common_queue *queue;
1191 UINT32 seq_oldest = 0;
1193 ids = (EV_ID *)kmalloc( (sizeof(EV_ID)*EV_MAX_IDS_IN_PROCESS), GFP_KERNEL );
1198 if (__get_user(num, &(query->num))) { /* pgr0039 */
1202 if (copy_from_user(&ids[0], query->ids, num * sizeof(EV_ID))) { /* pgr0039 */
1206 if (__put_user(EV_EVENT_None, &(query->ev.type))) { /* pgr0039 */
1215 EVK_mutex_lock(); /*************************************/
1217 for(i = 0 ; i < num /* pgr0039 */ ; i++) {
1218 queue = find_queue_entry(ids[i]);
1219 if (queue != NULL) {/* Have the specified queue ID */
1220 if ((EV_ID_IS_FLAG(ids[i])
1221 && ((struct flag *)queue)->value != 0)
1222 || (EV_ID_IS_FLAG64(ids[i])
1223 && ((struct flag64 *)queue)->value != 0)
1224 || (EV_ID_IS_QUEUE(ids[i])
1225 && ((struct message_queue *)queue)->num > 0)) {/*There are events.*/
1226 /* Compare with time_before macros for round 0 */
1227 if (first || time_before((unsigned long)queue->seq_num, /* pgr0006 */ /* pgr0039 */
1228 (unsigned long)seq_oldest)) {
1230 seq_oldest = queue->seq_num;
1238 if (EV_ID_IS_FLAG(ids[found])) {
1239 if (__put_user(ids[found], &(query->ev.u.flag.flagID))) { /* pgr0039 */
1243 ret = evk_get_flag_event(&(query->ev), peek_only, 0, evk_mem_user);
1244 } else if (EV_ID_IS_FLAG64(ids[found])) {
1245 if (__put_user(ids[found], &(query->ev.u.flag64.flagID))) { /* pgr0039 */
1249 ret = evk_get_flag64_event(&(query->ev), peek_only, 0, evk_mem_user);
1250 } else if (EV_ID_IS_QUEUE(ids[found])) {
1251 if (__put_user(ids[found], &(query->ev.u.message.queueID))) { /* pgr0039 */
1255 ret = evk_get_message_event(&(query->ev), peek_only, 0, evk_mem_user);
1260 EVK_mutex_unlock(); /*************************************/
1267 evk_ioctl(struct file *filp, unsigned int cmd,
1270 EVK_Message_Queue_Request mesq;
1271 int peek_only, wait;
1278 case EVK_IOC_CREATE_FLAG:
1279 queueID = (EV_ID)arg;
1280 ret = evk_create_flag(queueID);
1283 case EVK_IOC_CREATE_FLAG64:
1284 queueID = (EV_ID)arg;
1285 ret = evk_create_flag64(queueID);
1288 case EVK_IOC_CREATE_MESSAGE_QUEUE:
1289 #if (LINUX_VERSION_CODE >= KERNEL_VERSION(5, 0, 0))
1290 if (access_ok(arg, sizeof(mesq))) { /* pgr0039 */
1292 if (access_ok(VERIFY_READ, arg, sizeof(mesq))) { /* pgr0039 */
1294 if (copy_from_user(&mesq, (EV_Flag *)arg, sizeof(mesq))) {
1299 ret = evk_create_message_queue(mesq.queueID, mesq.length,
1300 mesq.max_bytes, mesq.type);
1306 case EVK_IOC_ALLOC_FLAG_ID:
1307 #if (LINUX_VERSION_CODE >= KERNEL_VERSION(5, 0, 0))
1308 if (access_ok(arg, sizeof(queueID))) { /* pgr0039 */
1310 if (access_ok(VERIFY_WRITE, arg, sizeof(queueID))) { /* pgr0039 */
1312 evk_alloc_flagID(&queueID);
1313 if (put_user(queueID, (EV_ID *)arg)) { /* pgr0039 */
1323 case EVK_IOC_ALLOC_FLAG64_ID:
1324 #if (LINUX_VERSION_CODE >= KERNEL_VERSION(5, 0, 0))
1325 if (access_ok(arg, sizeof(queueID))) { /* pgr0039 */
1327 if (access_ok(VERIFY_WRITE, arg, sizeof(queueID))) { /* pgr0039 */
1329 evk_alloc_flag64ID(&queueID);
1330 if (put_user(queueID, (EV_ID *)arg)) { /* pgr0039 */
1340 case EVK_IOC_ALLOC_QUEUE_ID:
1341 #if (LINUX_VERSION_CODE >= KERNEL_VERSION(5, 0, 0))
1342 if (access_ok(arg, sizeof(queueID))) { /* pgr0039 */
1344 if (access_ok(VERIFY_WRITE, arg, sizeof(queueID))) { /* pgr0039 */
1346 evk_alloc_queueID(&queueID);
1347 if (put_user(queueID, (EV_ID *)arg)) { /* pgr0039 */
1357 case EVK_IOC_DESTROY_QUEUE:
1358 queueID = (EV_ID)arg;
1359 ret = evk_destroy_queue(queueID);
1362 case EVK_IOC_STORE_FLAG:
1363 #if (LINUX_VERSION_CODE >= KERNEL_VERSION(5, 0, 0))
1364 if (access_ok(arg, sizeof(EV_Flag))) { /* pgr0039 */
1366 if (access_ok(VERIFY_READ, arg, sizeof(EV_Flag))) { /* pgr0039 */
1368 ret = evk_store_flag((EV_Flag *)arg, evk_mem_user);
1374 case EVK_IOC_STORE_FLAG64:
1375 #if (LINUX_VERSION_CODE >= KERNEL_VERSION(5, 0, 0))
1376 if (access_ok(arg, sizeof(EV_Flag64))) { /* pgr0039 */
1378 if (access_ok(VERIFY_READ, arg, sizeof(EV_Flag64))) { /* pgr0039 */
1380 ret = evk_store_flag64((EV_Flag64 *)arg, evk_mem_user);
1386 case EVK_IOC_STORE_MESSAGE:
1387 #if (LINUX_VERSION_CODE >= KERNEL_VERSION(5, 0, 0))
1388 if (access_ok(arg, sizeof(EV_Message))) { /* pgr0039 */
1390 if (access_ok(VERIFY_READ, arg, sizeof(EV_Message))) { /* pgr0039 */
1392 ret = evk_store_message((EV_Message *)arg, evk_mem_user);
1398 case EVK_IOC_SET_POLL:
1399 queueID = (EV_ID)arg;
1400 ret = evk_set_poll(filp, queueID);
1403 case EVK_IOC_PEEK_EVENT:
1409 case EVK_IOC_WAIT_EVENT:
1415 case EVK_IOC_GET_EVENT:
1419 #if (LINUX_VERSION_CODE >= KERNEL_VERSION(5, 0, 0))
1420 if (access_ok(arg, sizeof(EV_Event))) { /* pgr0039 */
1422 if (access_ok(VERIFY_WRITE, arg, sizeof(EV_Event))) { /* pgr0039 */
1424 ret = evk_get_event((EV_Event *)arg, peek_only, wait, evk_mem_user);
1430 case EVK_IOC_PEEK_NEXT_EVENT:
1435 case EVK_IOC_GET_NEXT_EVENT:
1438 #if (LINUX_VERSION_CODE >= KERNEL_VERSION(5, 0, 0))
1439 if (access_ok(arg, sizeof(EVK_Next_Event_Query))) { /* pgr0039 */
1441 if (access_ok(VERIFY_WRITE, arg, sizeof(EVK_Next_Event_Query))) { /* pgr0039 */
1443 ret = evk_get_next_event((EVK_Next_Event_Query *)arg, peek_only);
1449 case EVK_IOC_DEBUG_LIST:
1465 evk_poll(struct file *filp, poll_table *wait)
1467 unsigned int ret = 0;
1468 struct common_queue *queue;
1470 EV_ID queueID = (EV_ID)(filp->private_data);
1471 // Returns errors without stopping at assert if queueID is invalid
1472 // (Troubleshooting for Continuous Printing)
1473 if (!EV_ID_IS_VALID(queueID)) {
1474 printk("evk_poll ERROR: invalid queueID=%x\n", queueID);
1475 return POLLERR|POLLHUP;
1477 //EVK_assert(EV_ID_IS_VALID(queueID), "poll: flag/queueID not set");
1479 EVK_mutex_lock();/*****************************************/
1481 queue = find_queue_entry(queueID);
1482 if (queue == NULL) {
1483 EVK_info1("poll: No such flag/queueID %x\n", queueID);
1484 ret = POLLERR|POLLHUP;
1488 poll_wait(filp, &(queue->wq_head), wait);
1490 if (EV_ID_IS_FLAG(queueID)) {
1491 if (((struct flag *)queue)->value != 0) {
1494 } else if (EV_ID_IS_FLAG64(queueID)) {
1495 if (((struct flag64 *)queue)->value != 0) {
1499 if (((struct message_queue *)queue)->num > 0) {
1505 EVK_mutex_unlock(); /***************************************/
1510 /** List of system call corresponding function registrations */
1511 static struct file_operations evk_fops = {
1513 .release = evk_close,
1514 .unlocked_ioctl = evk_ioctl,
1518 #ifdef ENABLE_PROC_FS
1520 evk_procFS_show(struct seq_file *m, int mode)
1523 struct list_head *list, *entries;
1524 struct common_queue *queue;
1526 seq_printf(m, "[ev library status ");
1530 seq_printf(m, "(flag)]\n");
1531 seq_printf(m, "PID moduleID flagID[hash] value\n");
1533 case evk_enum_flag64:
1534 seq_printf(m, "(flag64)]\n");
1535 seq_printf(m, "PID moduleID flagID[hash] value\n");
1537 case evk_enum_queue:
1538 seq_printf(m, "(queue)]\n");
1539 seq_printf(m, "PID moduleID queueID[hash] maxbytes remain type\n");
1545 num = evk_get_queue_entry(&entries);
1547 for (i = 0 ; i < num ; i++) {
1548 list = &(entries[i]);
1549 if (!list_empty(list)) {
1550 list_for_each_entry(queue, list, list) {
1551 if ((mode == evk_enum_flag && (!EV_ID_IS_FLAG(queue->queueID)))
1552 || (mode == evk_enum_flag64 && (!EV_ID_IS_FLAG64(queue->queueID)))
1553 || (mode == evk_enum_queue && (!EV_ID_IS_QUEUE(queue->queueID))))
1558 seq_printf(m, "%08d ", queue->pid);
1559 seq_printf(m, "%05d(%04x) ", ((queue->queueID & 0x00ffff00) >> 8), ((queue->queueID & 0x00ffff00) >> 8));
1560 seq_printf(m, "0x%08x[%2d] ", queue->queueID, calc_hash(queue->queueID));
1564 seq_printf(m, "0x%x", EVK_PFLAG(queue)->value);
1567 case evk_enum_flag64:
1568 seq_printf(m, "0x%llx", EVK_PFLAG64(queue)->value);
1571 case evk_enum_queue:
1572 seq_printf(m, "%04d %02d ", EVK_PQUEUE(queue)->max_bytes, EVK_PQUEUE(queue)->length);
1573 seq_printf(m, "%02d ", EVK_PQUEUE(queue)->num);
1574 seq_printf(m, "%d ", EVK_PQUEUE(queue)->type);
1577 seq_printf(m, "\n");
1587 evk_procFS_flag_show(struct seq_file *m, void *v)
1589 return evk_procFS_show(m, evk_enum_flag);
1593 evk_procFS_flag64_show(struct seq_file *m, void *v)
1595 return evk_procFS_show(m, evk_enum_flag64);
1599 evk_procFS_queue_show(struct seq_file *m, void *v)
1601 return evk_procFS_show(m, evk_enum_queue);
1605 evk_procFS_flag_open(struct inode *inode, struct file *file)
1607 return single_open(file, evk_procFS_flag_show, NULL);
1611 evk_procFS_flag64_open(struct inode *inode, struct file *file)
1613 return single_open(file, evk_procFS_flag64_show, NULL);
1617 evk_procFS_queue_open(struct inode *inode, struct file *file)
1619 return single_open(file, evk_procFS_queue_show, NULL);
1622 static struct file_operations evk_proc_flag_fops = {
1623 .owner = THIS_MODULE,
1624 .open = evk_procFS_flag_open,
1626 .llseek = seq_lseek,
1627 .release = single_release,
1630 static struct file_operations evk_proc_flag64_fops = {
1631 .owner = THIS_MODULE,
1632 .open = evk_procFS_flag64_open,
1634 .llseek = seq_lseek,
1635 .release = single_release,
1638 static struct file_operations evk_proc_queue_fops = {
1639 .owner = THIS_MODULE,
1640 .open = evk_procFS_queue_open,
1642 .llseek = seq_lseek,
1643 .release = single_release,
1645 #endif /*ENABLE_PROC_FS*/
1651 #ifdef CONFIG_PROC_FS
1652 #ifdef ENABLE_PROC_FS
1653 struct proc_dir_entry *ret;
1654 #endif /* ENABLE_PROC_FS */
1655 #endif /* CONFIG_PROC_FS */
1659 dev = MKDEV(devmajor, devminor);
1660 err = register_chrdev_region(dev, nrdevs, EVK_NAME);
1662 EVK_info1("register_chrdev_region error %d\n", -err);
1666 cdev_init(&cdev, &evk_fops);
1667 cdev.owner = THIS_MODULE;
1668 cdev.ops = &evk_fops;
1670 err = cdev_add(&cdev, dev, 1);
1672 EVK_info1("cdev_add error %d\n", -err);
1676 /* Initialization */
1677 for(i = 0 ; i < EVK_MAX_FLAGS ; i++) {
1678 list_add_tail((struct list_head *)&(_flag_pool[i]),
1681 for(i = 0 ; i < EVK_MAX_FLAG64S ; i++) {
1682 list_add_tail((struct list_head *)&(_flag64_pool[i]),
1685 for(i = 0 ; i < EVK_MAX_MESSAGE_QUEUES ; i++) {
1686 list_add_tail((struct list_head *)&(_message_queue_pool[i]),
1687 &message_queue_pool);
1689 for(i = 0 ; i < HASH_KEY ; i++) {
1690 INIT_LIST_HEAD(&(queue_entry[i]));
1693 #ifdef CONFIG_PROC_FS
1694 #ifdef ENABLE_PROC_FS
1695 ret = proc_create("driver/ev_flag", 0, NULL, &evk_proc_flag_fops);
1697 EVK_info1("Unable to initialize /proc entry %d\n", -err);
1701 ret = proc_create("driver/ev_flag64", 0, NULL, &evk_proc_flag64_fops);
1703 EVK_info1("Unable to initialize /proc entry %d\n", -err);
1707 ret = proc_create("driver/ev_queue", 0, NULL, &evk_proc_queue_fops);
1709 EVK_info1("Unable to initialize /proc entry %d\n", -err);
1712 #endif /* ENABLE_PROC_FS */
1713 #endif /* CONFIG_PROC_FS */
1715 pClass = class_create(THIS_MODULE, EVK_NAME);
1716 device_create(pClass, NULL, dev, NULL, "agl/"EVK_NAME);
1722 //#ifndef CONFIG_COMBINE_MODULES
1727 dev_t dev = MKDEV(devmajor, devminor);
1728 device_destroy(pClass, dev);
1729 class_destroy(pClass);
1733 unregister_chrdev_region(dev, nrdevs);
1735 remove_proc_entry( "driver/ev_flag", 0 );
1736 remove_proc_entry( "driver/ev_flag64", 0 );
1737 remove_proc_entry( "driver/ev_queue", 0 );
1744 EVK_create_flag_in(EV_ID flagID)
1746 int ret = evk_create_flag(flagID);
1748 if (ret == -EEXIST) {
1749 return EV_ERR_Exist;
1750 } else if (ret < 0) {
1751 return EV_ERR_Fatal;
1758 EVK_create_flag64_in(EV_ID flagID)
1760 int ret = evk_create_flag64(flagID);
1762 if (ret == -EEXIST) {
1763 return EV_ERR_Exist;
1764 } else if (ret < 0) {
1765 return EV_ERR_Fatal;
1771 /** @see EV_create_flag */
1773 EVK_create_flag(EV_ID flagID)
1775 if (!EV_ID_IS_FLAG(flagID) || EV_ID_IS_AUTO_ID(flagID)) {
1776 return EV_ERR_Invalid_ID;
1778 return EVK_create_flag_in(flagID);
1781 /** @see EV_create_flag64 */
1783 EVK_create_flag64(EV_ID flagID)
1785 if (!EV_ID_IS_FLAG64(flagID) || EV_ID_IS_AUTO_ID(flagID)) {
1786 return EV_ERR_Invalid_ID;
1788 return EVK_create_flag64_in(flagID);
1792 EVK_create_queue_in(EV_ID queueID, UINT8 length, UINT16 max_bytes,
1793 EV_Message_Queue_Type type)
1795 int ret = evk_create_message_queue(queueID, length, max_bytes, type);
1797 if (ret == -EEXIST) {
1798 return EV_ERR_Exist;
1799 } else if (ret < 0) {
1800 return EV_ERR_Fatal;
1806 /** @see EV_create_queue */
1808 EVK_create_queue(EV_ID queueID, UINT8 length, UINT16 max_bytes,
1809 EV_Message_Queue_Type type)
1811 if (!EV_ID_IS_QUEUE(queueID) || EV_ID_IS_AUTO_ID(queueID)) {
1812 return EV_ERR_Invalid_ID;
1814 return EVK_create_queue_in(queueID, length, max_bytes, type);
1817 /** @see EV_create_flag_auto_id */
1819 EVK_create_flag_auto_id(/* OUT */EV_ID *flagID)
1822 EVK_assert(flagID != NULL, "NULL pointer was specified");
1824 if (evk_alloc_flagID(flagID) < 0) {
1825 return EV_ERR_Fatal;
1828 err = EVK_create_flag_in(*flagID);
1835 /** @see EV_create_flag64_auto_id */
1837 EVK_create_flag64_auto_id(/* OUT */EV_ID *flagID)
1840 EVK_assert(flagID != NULL, "NULL pointer was specified");
1842 if (evk_alloc_flag64ID(flagID) < 0) {
1843 return EV_ERR_Fatal;
1846 err = EVK_create_flag64_in(*flagID);
1853 /** @see EV_create_queue_auto_id */
1855 EVK_create_queue_auto_id(/* OUT */EV_ID *queueID, UINT8 length,
1856 UINT16 max_bytes, EV_Message_Queue_Type type)
1859 EVK_assert(queueID != NULL, "NULL pointer was specified");
1861 if (evk_alloc_queueID(queueID) < 0) {
1862 return EV_ERR_Fatal;
1865 err = EVK_create_queue_in(*queueID, length, max_bytes, type);
1867 *queueID = EV_NO_ID;
1872 /** @see EV_destroy_flag */
1874 EVK_destroy_flag(EV_ID flagID)
1877 err = evk_destroy_queue(flagID);
1879 if (err == -ENOENT) {
1880 return EV_ERR_Invalid_ID;
1881 } else if (err < 0) {
1882 return EV_ERR_Fatal;
1888 /** @see EV_destroy_queue */
1890 EVK_destroy_queue(EV_ID queueID)
1892 return EVK_destroy_flag(queueID);
1895 /* Sending the event */
1896 /** @see EV_set_flag */
1898 EVK_set_flag(EV_ID flagID, UINT32 bits)
1903 if (!EV_ID_IS_FLAG(flagID)) {
1904 return EV_ERR_Invalid_ID;
1906 flag.flagID = flagID;
1909 ret = evk_store_flag(&flag, evk_mem_kernel);
1910 if (ret == -ENOENT) {
1911 return EV_ERR_Invalid_ID;
1912 } else if (ret < 0) {
1913 return EV_ERR_Fatal;
1919 /** @see EV_set_flag64 */
1921 EVK_set_flag64(EV_ID flagID, UINT64 bits)
1926 if (!EV_ID_IS_FLAG64(flagID)) {
1927 return EV_ERR_Invalid_ID;
1929 flag.flagID = flagID;
1932 ret = evk_store_flag64(&flag, evk_mem_kernel);
1933 if (ret == -ENOENT) {
1934 return EV_ERR_Invalid_ID;
1935 } else if (ret < 0) {
1936 return EV_ERR_Fatal;
1942 /** @see EV_send_message */
1944 EVK_send_message(EV_ID queueID, UINT16 bytes, const void *message,
1947 EV_Message *msg = NULL;
1951 msg = evk_malloc( sizeof( EV_Message ) );
1954 ev_ret = EV_ERR_Fatal;
1958 if (!EV_ID_IS_QUEUE(queueID)) {
1959 ev_ret = EV_ERR_Invalid_ID;
1962 EVK_assert(message != NULL, "NULL pointer was specified");
1963 EVK_assert(bytes <= EV_MAX_MESSAGE_LENGTH, "send_message: message too long");
1965 msg->queueID = queueID;
1966 msg->senderInfo = senderInfo;
1967 msg->length = bytes;
1968 memcpy(msg->message, message, bytes);
1970 ret = evk_store_message(msg, evk_mem_kernel);
1971 if (ret == -ENOENT) {
1972 ev_ret = EV_ERR_Invalid_ID;
1973 } else if (ret == -EBUSY) {
1974 ev_ret = EV_ERR_Busy;
1975 } else if (ret < 0) {
1976 ev_ret = EV_ERR_Fatal;
1989 /* Event acquisition(Order of arrival time) */
1990 //EV_ERR EV_get_next_event(/* OUT */EV_Event *ev);
1993 EVK_get_flag_in(EV_ID flagID, EV_Flag *flag, int peek_only, int wait)
1995 EV_Event *ev = NULL;
1999 ev = evk_malloc( sizeof( EV_Event ) );
2002 ev_ret = EV_ERR_Fatal;
2006 EVK_assert(flag != NULL, "get_flag: NULL pointer was specified");
2007 flag->flagID = EV_NO_ID;
2010 if (!EV_ID_IS_FLAG(flagID)) {
2011 ev_ret = EV_ERR_Invalid_ID;
2015 ev->type = EV_EVENT_Flag;
2016 ev->u.flag.flagID = flagID;
2017 ev->u.flag.bits = 0;
2019 ret = evk_get_event(ev, peek_only, wait, evk_mem_kernel);
2022 if (ret == -ENOENT) {
2023 ev_ret = EV_ERR_Invalid_ID;
2024 } else if (ret == -EINTR) {
2025 ev_ret = EV_ERR_Interrupted;
2027 ev_ret = EV_ERR_Fatal;
2032 if (ev->type == EV_EVENT_Flag) {
2033 flag->flagID = ev->u.flag.flagID;
2034 flag->bits = ev->u.flag.bits;
2047 EVK_get_flag64_in(EV_ID flagID, EV_Flag64 *flag, int peek_only, int wait)
2049 EV_Event *ev = NULL;
2053 ev = evk_malloc( sizeof( EV_Event ) );
2056 ev_ret = EV_ERR_Fatal;
2060 EVK_assert(flag != NULL, "get_flag64: NULL pointer was specified");
2061 flag->flagID = EV_NO_ID;
2064 if (!EV_ID_IS_FLAG64(flagID)) {
2065 ev_ret = EV_ERR_Invalid_ID;
2069 ev->type = EV_EVENT_Flag64;
2070 ev->u.flag64.flagID = flagID;
2071 ev->u.flag64.bits = 0;
2073 ret = evk_get_event(ev, peek_only, wait, evk_mem_kernel);
2076 if (ret == -ENOENT) {
2077 ev_ret = EV_ERR_Invalid_ID;
2078 } else if (ret == -EINTR) {
2079 ev_ret = EV_ERR_Interrupted;
2081 ev_ret = EV_ERR_Fatal;
2086 if (ev->type == EV_EVENT_Flag64) {
2087 flag->flagID = ev->u.flag64.flagID;
2088 flag->bits = ev->u.flag64.bits;
2100 /* Event acquisition(With Search Criteria) */
2101 /** @see EV_get_flag */
2103 EVK_get_flag(EV_ID flagID, /* OUT */EV_Flag *flag)
2105 return EVK_get_flag_in(flagID, flag, 0, 0);
2108 /** @see EV_get_flag64 */
2110 EVK_get_flag64(EV_ID flagID, /* OUT */EV_Flag64 *flag)
2112 return EVK_get_flag64_in(flagID, flag, 0, 0);
2115 /** @see EV_wait_flag */
2117 EVK_wait_flag(EV_ID flagID, /* OUT */EV_Flag *flag)/* block */
2119 return EVK_get_flag_in(flagID, flag, 0, 1);
2122 /** @see EV_wait_flag64 */
2124 EVK_wait_flag64(EV_ID flagID, /* OUT */EV_Flag64 *flag)/* block */
2126 return EVK_get_flag64_in(flagID, flag, 0, 1);
2129 /** @see EV_peek_flag */
2131 EVK_peek_flag(EV_ID flagID, /* OUT */EV_Flag *flag)
2133 return EVK_get_flag_in(flagID, flag, 1, 0);
2136 /** @see EV_peek_flag64 */
2138 EVK_peek_flag64(EV_ID flagID, /* OUT */EV_Flag64 *flag)
2140 return EVK_get_flag64_in(flagID, flag, 1, 0);
2144 EVK_get_message_in(EV_ID queueID, EV_Message *message, UINT32 senderInfo,
2145 UINT32 length, const void *compare_bytes,
2146 int peek_only, int wait)
2148 EV_Event *ev = NULL;
2152 ev = evk_malloc( sizeof( EV_Event ) );
2155 ev_ret = EV_ERR_Fatal;
2159 EVK_assert(message != NULL, "get_message: NULL pointer was specified");
2160 if (!EV_ID_IS_QUEUE(queueID)) {
2161 ev_ret = EV_ERR_Invalid_ID;
2164 message->queueID = EV_NO_ID;
2165 message->senderInfo = 0;
2166 message->length = 0;
2168 ev->type = EV_EVENT_Message;
2169 ev->u.message.queueID = queueID;
2170 ev->u.message.senderInfo = senderInfo;
2171 if (compare_bytes != NULL) {
2172 ev->u.message.length = length;
2173 memcpy(ev->u.message.message, compare_bytes, length);
2175 ev->u.message.length = 0;
2178 ret = evk_get_event(ev, peek_only, wait, evk_mem_kernel);
2181 if (ret == -ENOENT) {
2182 ev_ret = EV_ERR_Invalid_ID;
2183 } else if (ret == -EINTR) {
2184 ev_ret = EV_ERR_Interrupted;
2186 ev_ret = EV_ERR_Fatal;
2191 if (ev->type == EV_EVENT_Message) {
2192 message->queueID = ev->u.message.queueID;
2193 message->senderInfo = ev->u.message.senderInfo;
2194 message->length = ev->u.message.length;
2195 memcpy(message->message, ev->u.message.message, ev->u.message.length);
2207 /** @see EV_get_message */
2209 EVK_get_message(EV_ID queueID, /* OUT */EV_Message *message)
2211 return EVK_get_message_in(queueID, message, 0, 0, NULL, 0, 0);
2214 /** @see EV_wait_message */
2216 EVK_wait_message(EV_ID queueID, /* OUT */EV_Message *message)/* block */
2218 return EVK_get_message_in(queueID, message, 0, 0, NULL, 0, 1);
2221 /** @see EV_peek_message */
2223 EVK_peek_message(EV_ID queueID, /* OUT */EV_Message *message)
2225 return EVK_get_message_in(queueID, message, 0, 0, NULL, 1, 0);
2228 /** @see EV_find_message_by_sender */
2230 EVK_find_message_by_sender(EV_ID queueID, UINT32 senderInfo,
2231 /* OUT */EV_Message *message)
2234 return EV_ERR_Fatal;
2237 /** @see EV_find_message_by_content */
2239 EVK_find_message_by_content(EV_ID queueID, UINT16 length,
2240 const void *compare_bytes,
2241 /* OUT */EV_Message *message)
2244 return EV_ERR_Fatal;
2248 EXPORT_SYMBOL(evk_get_queue_entry);
2249 EXPORT_SYMBOL(EVK_init);
2250 EXPORT_SYMBOL(EVK_exit);
2252 EXPORT_SYMBOL(EVK_create_flag);
2253 EXPORT_SYMBOL(EVK_create_flag64);
2254 EXPORT_SYMBOL(EVK_create_queue);
2255 EXPORT_SYMBOL(EVK_create_flag_auto_id);
2256 EXPORT_SYMBOL(EVK_create_flag64_auto_id);
2257 EXPORT_SYMBOL(EVK_create_queue_auto_id);
2258 EXPORT_SYMBOL(EVK_destroy_flag);
2259 EXPORT_SYMBOL(EVK_destroy_queue);
2260 EXPORT_SYMBOL(EVK_set_flag);
2261 EXPORT_SYMBOL(EVK_set_flag64);
2262 EXPORT_SYMBOL(EVK_send_message);
2263 EXPORT_SYMBOL(EVK_get_flag);
2264 EXPORT_SYMBOL(EVK_wait_flag);
2265 EXPORT_SYMBOL(EVK_peek_flag);
2266 EXPORT_SYMBOL(EVK_get_flag64);
2267 EXPORT_SYMBOL(EVK_wait_flag64);
2268 EXPORT_SYMBOL(EVK_peek_flag64);
2269 EXPORT_SYMBOL(EVK_get_message);
2270 EXPORT_SYMBOL(EVK_wait_message);
2271 EXPORT_SYMBOL(EVK_peek_message);
2273 #ifndef CONFIG_COMBINE_MODULES
2274 //MODULE_LICENSE("proprietary");
2275 MODULE_LICENSE("GPL");
2276 MODULE_DESCRIPTION("EVent library for Kernel");
2277 //MODULE_SUPPORTED_DEVICE(name);
2278 //MODULE_PARM(var,type)
2279 //MODULE_PARM_DESC(var,desc)
2280 module_init(EVK_init);
2281 module_exit(EVK_exit);
2282 #endif /* !CONFIG_COMBINE_MODULES */