2 * drivers/agl/evk_lib.c
4 * Event library (kernel space part)
6 * @copyright Copyright (c) 2016-2020 TOYOTA MOTOR CORPORATION.
8 * This file is free software; you can redistribute it and/or modify
9 * it under the terms of version 2 of the GNU General Public License
10 * as published by the Free Software Foundation.
12 * This program is distributed in the hope that it will be useful,
13 * but WITHOUT ANY WARRANTY; without even the implied warranty of
14 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
15 * GNU General Public License for more details.
17 * You should have received a copy of the GNU General Public License
18 * along with this program; if not, write to the Free Software Foundation,
19 * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301, USA.
22 #include <linux/module.h>
23 #include <linux/init.h>
24 #include <linux/version.h>
25 #include <linux/kernel.h>
26 #include <linux/slab.h>
27 #include <linux/sched.h>
28 #include <linux/wait.h>
29 #include <linux/poll.h>
30 #include <linux/list.h>
31 #include <asm/uaccess.h>
32 #include <linux/errno.h>
33 #include <linux/vmalloc.h>
35 #include <linux/proc_fs.h>
36 #include <linux/seq_file.h>
38 #include <linux/device.h>
39 #include <linux/cdev.h>
41 #include <linux/types.h>
42 #include <linux/ioctl.h>
44 #ifndef STANDARD_INT_DEFINITIONS
45 #define STANDARD_INT_DEFINITIONS
54 #endif /* !STANDARD_INT_DEFINITIONS */
56 #if LINUX_VERSION_CODE >= KERNEL_VERSION(5,6,0)
60 #define ENABLE_PROC_FS 1
62 #define EVK_NAME "evk"
63 #define EVK_DEV_NAME "/dev/agl/"EVK_NAME
64 #define EVK_DEV_MAJOR (1033 % 256)
65 #define EVK_IOC_MAGIC 0xE7
67 #define EVK_IOC_CREATE_FLAG _IO(EVK_IOC_MAGIC, 0x00)
68 #define EVK_IOC_CREATE_FLAG64 _IO(EVK_IOC_MAGIC, 0x01)
69 #define EVK_IOC_CREATE_MESSAGE_QUEUE _IO(EVK_IOC_MAGIC, 0x02)
70 #define EVK_IOC_ALLOC_FLAG_ID _IO(EVK_IOC_MAGIC, 0x03)
71 #define EVK_IOC_ALLOC_FLAG64_ID _IO(EVK_IOC_MAGIC, 0x04)
72 #define EVK_IOC_ALLOC_QUEUE_ID _IO(EVK_IOC_MAGIC, 0x05)
73 #define EVK_IOC_DESTROY_QUEUE _IO(EVK_IOC_MAGIC, 0x06)
74 #define EVK_IOC_STORE_FLAG _IO(EVK_IOC_MAGIC, 0x07)
75 #define EVK_IOC_STORE_FLAG64 _IO(EVK_IOC_MAGIC, 0x08)
76 #define EVK_IOC_STORE_MESSAGE _IO(EVK_IOC_MAGIC, 0x09)
77 #define EVK_IOC_SET_POLL _IO(EVK_IOC_MAGIC, 0x0a)
78 #define EVK_IOC_GET_EVENT _IO(EVK_IOC_MAGIC, 0x0b)
79 #define EVK_IOC_PEEK_EVENT _IO(EVK_IOC_MAGIC, 0x0c)
80 #define EVK_IOC_WAIT_EVENT _IO(EVK_IOC_MAGIC, 0x0d)
81 #define EVK_IOC_GET_NEXT_EVENT _IO(EVK_IOC_MAGIC, 0x0e)
82 #define EVK_IOC_PEEK_NEXT_EVENT _IO(EVK_IOC_MAGIC, 0x0f)
83 #define EVK_IOC_DEBUG_LIST _IO(EVK_IOC_MAGIC, 0x10)
85 /** @brief Flag ID and queue ID types used when sending and receiving events
87 * Assign a 32-bit value as follows
89 * - Most significant 8 bits: Reserved
90 * - Next 16 bits: Modue ID
91 * - Lower 8 bits: Define in module
93 * Where module is the modules that creates the queue.
94 * The modules define ID according to the above assignments using EV_Flag_ID_Base and EV_Queue_ID_Base macros.
95 * @see EV_Flag_ID_Base
96 * @see EV_Queue_ID_Base
100 #define EV_ID_BIT 0x80000000UL
101 #define EV_FLAG64_BIT 0x40000000UL
102 #define EV_FLAG_BIT 0x20000000UL
103 #define EV_QUEUE_BIT 0x10000000UL
104 #define EV_AUTO_ID_BIT 0x08000000UL
105 #define EV_RESERVED_BIT 0xff000000UL
106 #define EV_INVALID_ID EV_ID_BIT
107 #define EV_NO_ID EV_INVALID_ID
109 #define EV_ID_IS_FLAG(queueID) \
110 (((queueID) & (EV_ID_BIT|EV_FLAG_BIT)) == (EV_ID_BIT|EV_FLAG_BIT))
111 #define EV_ID_IS_FLAG64(queueID) \
112 (((queueID) & (EV_ID_BIT|EV_FLAG64_BIT)) == (EV_ID_BIT|EV_FLAG64_BIT))
113 #define EV_ID_IS_QUEUE(queueID) \
114 (((queueID) & (EV_ID_BIT|EV_QUEUE_BIT)) == (EV_ID_BIT|EV_QUEUE_BIT))
115 #define EV_ID_IS_AUTO_ID(queueID) \
116 (((queueID) & (EV_ID_BIT|EV_AUTO_ID_BIT)) == (EV_ID_BIT|EV_AUTO_ID_BIT))
117 #define EV_ID_IS_VALID(queueID) \
118 (EV_ID_IS_FLAG(queueID) || EV_ID_IS_FLAG64(queueID) || EV_ID_IS_QUEUE(queueID))
120 /** @brief Macros for defining flag ID
122 * Define the module ID as an argument as follows.
123 * - #define XXX_Module_ID 1
124 * - #define XXX_Flag_ID_Base EV_Flag_ID_Base(XXX_Module_ID)
126 * - #define XXX_Flag_foo (XXX_Flag_ID_Base + 1)
127 * - #define XXX_Flag_bar (XXX_Flag_ID_Base + 2)
129 * The module ID is 16 bits and 0 to 65535 can be specified.
130 * In addition, 0 to 255 added to Base can be defined as ID.
132 #define EV_Flag_ID_Base(mod) (EV_ID_BIT|EV_FLAG_BIT|((mod)<<8))
134 /** @brief Macros for defining 64 bits flag ID
136 * Define the module ID as an argument as follows.
137 * - #define XXX_Module_ID 1
138 * - #define XXX_Flag64_ID_Base EV_Flag64_ID_Base(XXX_Module_ID)
140 * - #define XXX_Flag64_foo (XXX_Flag64_ID_Base + 1)
141 * - #define XXX_Flag64_bar (XXX_Flag64_ID_Base + 2)
143 * The module ID is 16 bits and 0 to 65535 can be specified.
144 * In addition, 0 to 255 added to Base can be defined as ID.
146 #define EV_Flag64_ID_Base(mod) (EV_ID_BIT|EV_FLAG64_BIT|((mod)<<8))
148 /** @brief Macros for defining mesage queue ID
150 * Define the module ID as an argument as follows.
151 * - #define XXX_Module_ID 1
152 * - #define XXX_Queue_ID_Base EV_Queue_ID_Base(XXX_Module_ID)
154 * - #define XXX_Queue_foo (XXX_Queue_ID_Base + 1)
155 * - #define XXX_Queue_bar (XXX_Queue_ID_Base + 2)
157 * The module ID is 16 bits and 0 to 65535 can be specified.
158 * In addition, 0 to 255 added to Base can be defined as ID.
160 #define EV_Queue_ID_Base(mod) (EV_ID_BIT|EV_QUEUE_BIT|((mod)<<8))
162 /** @brief Maximum number of bytes for message event */
163 #define EV_MAX_MESSAGE_LENGTH 2048
165 /** @brief -Maximum number of flag queue that can be created within a thread */
166 #define EV_MAX_IDS_IN_THREAD 24
168 /** @brief -Muximum number of threads that can be registered to the EV in a process */
169 #define EV_MAX_THREADS_IN_PROCESS 16
171 /** @brief -Maximum number of flag queue that can be creat within a process
173 #define EV_MAX_IDS_IN_PROCESS \
174 (EV_MAX_IDS_IN_THREAD * EV_MAX_THREADS_IN_PROCESS)
176 /** @brief Return values for even library function
182 EV_OK = 0, /**< Normal completion */
183 EV_ERR_Exist, /**< The specified flag message queue does exist */
184 EV_ERR_Invalid_ID, /**< The specified flag message queue does not exist */
185 EV_ERR_Busy, /**< Message queue full failed to send */
186 EV_ERR_Interrupted, /**< Waiting function was interrupted by an interrupt */
187 EV_ERR_Thread_Over, /**< Exceeding the number of threads in the process */
188 EV_ERR_Invalid_Thread, /**< Invalid thread ID */
189 EV_ERR_Fatal, /**< Fatal error */
191 /** @brief Return values type for even library function
195 typedef INT32 EV_ERR;
197 /** @brief Event type
199 * Use in the type of EV_Event structs
202 typedef UINT32 EV_Type;
204 /** @brief Bit value representing the type of event */
206 EV_EVENT_None = 0x00000000,
208 /** Flag event: Judged by EV_EVENT_IS_FLAG() */
209 EV_EVENT_Flag = 0x0001,
210 #define EV_EVENT_IS_FLAG(tp) (((tp) & EV_EVENT_Flag) != 0)
212 /** Message event: Judged by EV_EVENT_IS_MESSAGE() */
213 EV_EVENT_Message = 0x0002,
214 #define EV_EVENT_IS_MESSAGE(tp) (((tp) & EV_EVENT_Message) != 0)
216 /** 64bit flag event: Judged by EV_EVENT_IS_FLAG64() */
217 EV_EVENT_Flag64 = 0x0003,
218 #define EV_EVENT_IS_FLAG64(tp) (((tp) & EV_EVENT_Flag64) != 0)
222 /** @brief Flag event structure */
224 EV_ID flagID;/**< Flag ID */
225 UINT32 bits;/**< Bit pattern */
228 /** @brief 64bit flag event structure */
230 EV_ID flagID;/**< Flag ID */
231 UINT64 bits;/**< Bit pattern */
234 /** @brief Message event structure */
236 EV_ID queueID;/**< queue ID */
237 UINT32 senderInfo;/**< Source information */
238 UINT32 length;/**< Number of bytes in the message */
239 UINT32 dummy;/** dummy for pading */
240 UINT8 message[EV_MAX_MESSAGE_LENGTH];/**< Message */
243 /** @brief Event structure */
245 EV_Type type; /**< Event type */
247 EV_Flag flag; /**< Flag event structure */
248 EV_Flag64 flag64; /**< Flag event structure */
249 EV_Message message; /**< Message event structure */
250 } u; /**< Union of structures per eventtype */
253 /** @brief Message event queue type
255 * Specify the action to be taken when the queue overflows (more events are received when the queue is full).
257 enum ev_message_queue_type {
258 EV_MESSAGE_QUEUE_TYPE_BUSY,/**< Return a BUSY to the source */
259 EV_MESSAGE_QUEUE_TYPE_FIFO,/**< Delete the oldest event */
260 EV_MESSAGE_QUEUE_TYPE_REPLACE,/**< Replace the most recent event */
263 /** @brief Messge event queue type
265 * @see ev_message_queue_type
267 typedef UINT8 EV_Message_Queue_Type;
269 /** @addtogroup EV_in */
271 /** In Linux2.4, list_for_each_entry is not provided, so it is prepared by self (in 2.6)
273 #ifdef list_for_each_entry
274 #define __LINUX_26_OR_HIGHER
277 #ifndef __LINUX_26_OR_HIGHER /* linux v2.4 */
279 #define list_for_each_entry(pos, head, member) \
280 for (pos = list_entry((head)->next, typeof(*pos), member), \
281 prefetch(pos->member.next); \
282 &pos->member != (head); \
283 pos = list_entry(pos->member.next, typeof(*pos), member), \
284 prefetch(pos->member.next))
286 #define list_for_each_entry_safe(pos, n, head, member) \
287 for (pos = list_entry((head)->next, typeof(*pos), member), \
288 n = list_entry(pos->member.next, typeof(*pos), member); \
289 &pos->member != (head); \
290 pos = n, n = list_entry(n->member.next, typeof(*n), member))
292 #else /* linux v2.6 */
294 #include <linux/jiffies.h>
296 #endif /* linux v2.6 */
298 #define EVK_assert(cond, mesg) \
300 printk(KERN_ALERT "[EVK]ASSERT(pid:%d): " #cond " at %s:%d; " \
301 #mesg "\n", current->pid, __FILE__, __LINE__); \
305 #define EVK_BUG(mesg) \
306 printk(KERN_ALERT "[EVK]BUG: " mesg); \
310 #define EVK_info0(s) printk(KERN_ALERT "[EVK]INFO: " s)
311 #define EVK_info1(s, t) printk(KERN_ALERT "[EVK]INFO: " s, t)
314 #define EVK_info1(s, t)
317 static int devmajor = EVK_DEV_MAJOR;
318 static int devminor = 0;
319 static int nrdevs = 1;
320 static struct cdev cdev;
321 static struct class *pClass;
323 DEFINE_SEMAPHORE(evk_mtx);
325 static int down_line;
326 #define EVK_mutex_lock() { \
328 down_pid = current->pid; \
329 down_line = __LINE__; \
331 #define EVK_mutex_unlock() (up(&evk_mtx))
333 #ifdef EVK_USE_KMALLOC
334 #define evk_malloc(s) kmalloc((s), GFP_KERNEL)
335 #define evk_free kfree
336 #else // use vmalloc (this is the default)
337 #define evk_malloc(s) vmalloc((s))
338 #define evk_free vfree
352 #define GET_USER_OR_KERNEL(to, from) \
355 if (mem == evk_mem_user) { \
356 err = __get_user((to), &(from)); \
364 #define PUT_USER_OR_KERNEL(value, to) \
367 if (mem == evk_mem_user) { \
368 err = __put_user((value), &(to)); \
377 /** @brief Common part of the flag structure and message queue structure */
378 #define EVK_COMMON_QUEUE_ELEMS \
379 struct list_head list; /**< List structure */ \
380 wait_queue_head_t wq_head; /**< Wait_queue of a process waiting for a queue */\
381 EV_ID queueID; /**< Flag ID/Queue ID */ \
382 UINT32 seq_num; /**< Order of event arrival */ \
383 pid_t read_pid; /**< Read process ID */ \
384 pid_t pid; /**< Owning process ID */
386 /** @brief Common part of the flag structure and message queue structure */
387 struct common_queue {
388 EVK_COMMON_QUEUE_ELEMS
391 /** @brief Flag structure */
393 EVK_COMMON_QUEUE_ELEMS
394 UINT32 value;/**< Flags value */
396 #define EVK_PFLAG(queue) ((struct flag*)queue)
398 /** @brief 64-bit flag structure */
400 EVK_COMMON_QUEUE_ELEMS
401 UINT64 value;/**< Flags value */
403 #define EVK_PFLAG64(queue) ((struct flag64*)queue)
405 /** @brief Message queue structure */
406 struct message_queue {
407 EVK_COMMON_QUEUE_ELEMS
408 UINT8 type;/**< Type */
409 UINT8 length;/**< Queue length */
410 UINT8 num;/**< Number of messages stored */
411 UINT8 readptr;/**< Next read position(0~length-1) */
412 UINT32 max_bytes;/**< -Maximum bytes per message */
413 UINT8 *message;/**< Message storage area (ring buffer) */
415 #define EVK_PQUEUE(queue) ((struct message_queue*)queue)
417 /** @brief Number of bytes to allocate per message
419 * This function allocates an area to store the number of bytes actually stored, the time of occurrence,
420 * and the senderInfo, in addition to the number of bytes specified by max_bytes.
422 #define EVK_message_block_size(max_bytes) (sizeof(UINT32) * 3 + (max_bytes))
426 /** @brief Maximum number of flags used by all systems */
427 #define EVK_MAX_FLAGS 48
428 /** @brief Maximum number of 64-bit flags used by all systems */
429 #define EVK_MAX_FLAG64S 4
430 /** @brief Maximum number of message event queues used by all systems */
431 /* M1SP BM3547 MESSAGE_QUEUES 128->144 */
432 /* M9AT BM2066 MESSAGE_QUEUES 144->218 */
433 #define EVK_MAX_MESSAGE_QUEUES 224
435 /** @brief Allocate flag structure statically */
436 static struct flag _flag_pool[EVK_MAX_FLAGS];
437 /** @brief Statically allocates a 64-bit flag structure */
438 static struct flag64 _flag64_pool[EVK_MAX_FLAG64S];
439 /** @brief Beginning of the list of unused flags */
440 static LIST_HEAD(flag_pool);
441 /** @brief Beginning of the list of unused 64-bit flags */
442 static LIST_HEAD(flag64_pool);
444 /** @brief Allocate message queue structure statically */
445 static struct message_queue _message_queue_pool[EVK_MAX_MESSAGE_QUEUES];
446 /** @brief Top of the list of unused message queues */
447 static LIST_HEAD(message_queue_pool);
449 /** @brief List of Flags/Message Queues in Use
451 * Connects the usage flag/message queue to a list for each hash value obtained from the ID.
452 * The hash value is the remainder of the ID divided by HASH_KEY.
454 static struct list_head queue_entry[HASH_KEY];
456 /** @brief Sequence number to use during automatic ID assignment */
457 static EV_ID sequence_id = 0;
459 /** @brief Number to be assigned in order of occurrence of the event */
460 static UINT32 sequence_number = 0;
463 EV_ID queueID;/**< Queue ID */
464 UINT32 max_bytes;/**< Maximum number of bytes for an event */
465 UINT8 length;/**< Queue length */
466 EV_Message_Queue_Type type;/**< Type */
467 } EVK_Message_Queue_Request;
470 INT32 num; /**< Number of queue ID of search */
471 EV_ID ids[EV_MAX_IDS_IN_PROCESS]; /**< Queue ID of search */
472 EV_Event ev; /**< [OUT] First event that occured */
473 } EVK_Next_Event_Query;
476 evk_get_queue_entry(struct list_head **entries)
479 *entries = queue_entry;
485 static __inline__ int
486 calc_hash(UINT32 val)
488 return val % HASH_KEY;
496 //EVK_assert(!list_empty(&flag_pool), "flag pool empty");
497 if (list_empty(&flag_pool)) {
498 printk("%s ERROR: flag pool empty\n", __func__);
501 queue = (struct flag *)flag_pool.next;
502 list_del_init(&(queue->list));
510 struct flag64 *queue;
511 //EVK_assert(!list_empty(&flag64_pool), "flag64 pool empty");
512 if (list_empty(&flag64_pool)) {
513 printk("%s ERROR: flag64 pool empty\n", __func__);
516 queue = (struct flag64 *)flag64_pool.next;
517 list_del_init(&(queue->list));
521 static __inline__ void
522 free_flag(struct flag *queue)
524 list_add((struct list_head *)queue, &flag_pool);
527 static __inline__ void
528 free_flag64(struct flag64 *queue)
530 list_add((struct list_head *)queue, &flag64_pool);
534 struct message_queue *
535 alloc_message_queue(void)
537 struct message_queue *queue;
538 //EVK_assert(!list_empty(&message_queue_pool), "message queue pool empty");
539 if (list_empty(&message_queue_pool)) {
540 printk("%s ERROR: message queue pool empty\n", __func__);
543 queue = (struct message_queue *)message_queue_pool.next;
544 list_del_init(&(queue->list));
545 queue->message = NULL;
549 static __inline__ void
550 free_message_queue(struct message_queue *queue)
552 if (queue->message != NULL) {
553 evk_free(queue->message);
554 queue->message = NULL;
556 list_add((struct list_head *)queue, &message_queue_pool);
560 struct common_queue *
561 find_queue_entry(EV_ID queueID)
563 struct list_head *list;
564 struct common_queue *queue;
565 int hash = calc_hash(queueID);
566 list = &(queue_entry[hash]);
567 list_for_each_entry(queue, list, list) { /* pgr0060 */ /* pgr0039 */
568 if (queue->queueID == queueID) {
575 static __inline__ void
576 attach_queue_entry(struct common_queue *queue, EV_ID queueID)
578 int hash = calc_hash(queueID);
579 list_add_tail((struct list_head *)queue, &(queue_entry[hash]));
582 static __inline__ void
583 detach_queue_entry(struct common_queue *queue)
585 list_del_init((struct list_head *)queue);
588 static __inline__ void
589 init_common_queue(struct common_queue *queue, EV_ID queueID)
591 queue->queueID = queueID;
592 queue->pid = current->pid;
594 init_waitqueue_head(&(queue->wq_head));
597 static __inline__ void
598 evk_init_flag(struct flag *queue, EV_ID queueID)
600 init_common_queue((struct common_queue *)queue, queueID);
604 static __inline__ void
605 evk_init_flag64(struct flag64 *queue, EV_ID queueID)
607 init_common_queue((struct common_queue *)queue, queueID);
611 static __inline__ int
612 evk_init_message_queue(struct message_queue *queue, EV_ID queueID,
613 UINT8 length, UINT32 max_bytes, UINT8 type)
615 init_common_queue((struct common_queue *)queue, queueID);
617 queue->length = length;
618 queue->max_bytes = max_bytes;
621 EVK_assert(queue->message == NULL, "message buffer was not freed");
622 queue->message = evk_malloc(length * EVK_message_block_size(max_bytes));
623 EVK_assert(queue->message != NULL, "can't alloc message buffer");
630 struct list_head *list;
631 struct common_queue *queue;
633 for(i = 0 ; i < HASH_KEY ; i++) {
634 list = &(queue_entry[i]);
635 if (!list_empty(list)) {
636 printk(KERN_ALERT "%d->", i);
637 list_for_each_entry(queue, list, list) { /* pgr0060 */ /* pgr0039 */
638 printk("%x[%x] ", queue->queueID, queue->seq_num);
646 evk_destroy_queue(EV_ID queueID)
648 struct common_queue *queue;
650 EVK_info1("flag destroy %x\n", queueID);
652 EVK_mutex_lock(); /*************************************/
653 queue = find_queue_entry(queueID);
659 detach_queue_entry(queue);
661 /* wake up processes before destruction */
662 wake_up_interruptible(&(queue->wq_head));
664 init_common_queue(queue, EV_INVALID_ID);
666 if (EV_ID_IS_FLAG(queueID)) {
667 free_flag((struct flag *)queue);
668 } else if (EV_ID_IS_FLAG64(queueID)) {
669 free_flag64((struct flag64 *)queue);
670 } else if (EV_ID_IS_QUEUE(queueID)) {
671 free_message_queue((struct message_queue *)queue);
675 EVK_mutex_unlock(); /*************************************/
680 evk_open(struct inode *inode, struct file *file)
682 // Recording of current and measures not to be read or deleted from others are required. */
683 file->private_data = (void *)EV_INVALID_ID;
688 evk_close(struct inode *inode, struct file *file)
690 if (EV_ID_IS_VALID((EV_ID)file->private_data)) {
691 evk_destroy_queue((EV_ID)file->private_data);
693 file->private_data = (void *)EV_INVALID_ID;
698 evk_create_flag(EV_ID queueID)
702 EVK_info1("flag create %x\n", queueID);
704 EVK_mutex_lock(); /*************************************/
706 queue = (struct flag *)find_queue_entry(queueID);
712 queue = alloc_flag();
718 evk_init_flag(queue, queueID);
719 attach_queue_entry((struct common_queue *)queue, queueID);
723 EVK_mutex_unlock(); /***********************************/
728 evk_create_flag64(EV_ID queueID)
730 struct flag64 *queue;
732 EVK_info1("flag64 create %x\n", queueID);
734 EVK_mutex_lock(); /*************************************/
736 queue = (struct flag64 *)find_queue_entry(queueID);
742 queue = alloc_flag64();
748 evk_init_flag64(queue, queueID);
749 attach_queue_entry((struct common_queue *)queue, queueID);
753 EVK_mutex_unlock(); /***********************************/
758 evk_create_message_queue(EV_ID queueID, UINT8 length,
759 UINT32 max_bytes, EV_Message_Queue_Type type)
761 struct message_queue *queue;
763 EVK_info1("message create %x\n", queueID);
767 EVK_mutex_lock(); /*************************************/
769 queue = (struct message_queue *)find_queue_entry(queueID);
775 queue = alloc_message_queue();
781 err = evk_init_message_queue(queue, queueID, length, max_bytes, type);
783 attach_queue_entry((struct common_queue *)queue, queueID);
785 free_message_queue(queue);
790 EVK_mutex_unlock(); /***********************************/
799 if ((sequence_id & EV_RESERVED_BIT) != 0) {/* round to 1 */
801 EVK_info0("auto ID rounded\n");
808 evk_alloc_flagID(EV_ID *queueID)
812 EVK_mutex_lock(); /*************************************/
814 seq_id = get_seq_id();
815 seq_id |= (EV_ID_BIT | EV_FLAG_BIT | EV_AUTO_ID_BIT);
816 } while(find_queue_entry(seq_id) != NULL);
817 EVK_mutex_unlock(); /*************************************/
824 evk_alloc_flag64ID(EV_ID *queueID)
828 EVK_mutex_lock(); /*************************************/
830 seq_id = get_seq_id();
831 seq_id |= (EV_ID_BIT | EV_FLAG64_BIT | EV_AUTO_ID_BIT);
832 } while(find_queue_entry(seq_id) != NULL);
833 EVK_mutex_unlock(); /*************************************/
840 evk_alloc_queueID(EV_ID *queueID)
844 EVK_mutex_lock(); /*************************************/
846 seq_id = get_seq_id();
847 seq_id |= (EV_ID_BIT | EV_QUEUE_BIT | EV_AUTO_ID_BIT);
848 } while(find_queue_entry(seq_id) != NULL);
849 EVK_mutex_unlock(); /*************************************/
856 evk_store_flag(EV_Flag *ev, int mem)
863 if (GET_USER_OR_KERNEL(flagID, ev->flagID)) /* pgr0039 */
865 if (GET_USER_OR_KERNEL(bits, ev->bits)) /* pgr0039 */
868 EVK_mutex_lock(); /*************************************/
870 queue = (struct flag *)find_queue_entry(flagID); /* pgr0000 */
872 EVK_info1("set_flag: No such ID %x\n", flagID);
877 if (queue->value == 0) {
878 queue->seq_num = sequence_number++;
880 queue->value |= bits; /* pgr0000 */
882 wake_up_interruptible(&(queue->wq_head));
885 EVK_mutex_unlock(); /***********************************/
890 evk_store_flag64(EV_Flag64 *ev, int mem)
892 struct flag64 *queue;
897 if (GET_USER_OR_KERNEL(flagID, ev->flagID)) /* pgr0039 */
899 //GET_USER_OR_KERNEL(bits, ev->bits); /* pgr0039 */
900 if (mem == evk_mem_user) {
901 if (copy_from_user(&bits, &(ev->bits), sizeof(bits)))
907 EVK_mutex_lock(); /*************************************/
909 queue = (struct flag64 *)find_queue_entry(flagID); /* pgr0000 */
911 EVK_info1("set_flag64: No such ID %x\n", flagID);
916 if (queue->value == 0) {
917 queue->seq_num = sequence_number++;
919 queue->value |= bits; /* pgr0000 */
921 wake_up_interruptible(&(queue->wq_head));
924 EVK_mutex_unlock(); /***********************************/
929 evk_store_message(EV_Message *ev, int mem)
931 struct message_queue *queue;
936 UINT32 length, senderInfo, seq;
938 if (GET_USER_OR_KERNEL(queueID, ev->queueID)) /* pgr0039 */
940 if (GET_USER_OR_KERNEL(length, ev->length)) /* pgr0039 */
942 if (GET_USER_OR_KERNEL(senderInfo, ev->senderInfo)) /* pgr0039 */
945 EVK_mutex_lock(); /*************************************/
947 queue = (struct message_queue *)find_queue_entry(queueID); /* pgr0000 */
949 EVK_info1("store_message: No such queueID %x\n", queueID);
954 if (length > queue->max_bytes) { /* pgr0000 */
955 EVK_info0("store_message: message is too long for the queue");
960 if (queue->num == queue->length) {
962 switch(queue->type) {
963 case EV_MESSAGE_QUEUE_TYPE_BUSY:
964 EVK_info1("store_message: queue %x BUSY\n", queueID);
969 case EV_MESSAGE_QUEUE_TYPE_FIFO:
971 queue->readptr %= queue->length;
975 case EV_MESSAGE_QUEUE_TYPE_REPLACE:
980 EVK_BUG("internal error in store_message\n");
987 writeptr = (queue->readptr + queue->num) % queue->length;
988 ptr = queue->message + writeptr * EVK_message_block_size(queue->max_bytes);
990 memcpy(ptr, &length, sizeof(length));
991 ptr += sizeof(length);
992 memcpy(ptr, &senderInfo, sizeof(senderInfo));
993 ptr += sizeof(senderInfo);
994 seq = sequence_number++;
995 memcpy(ptr, &seq, sizeof(seq));
998 if (queue->num == 0) {
999 queue->seq_num = seq;
1003 if (mem == evk_mem_user) {
1004 if (copy_from_user(ptr, ev->message, length)) {
1009 memcpy(ptr, ev->message, length);
1012 wake_up_interruptible(&(queue->wq_head));
1015 EVK_mutex_unlock(); /***********************************/
1021 evk_set_poll(struct file *filp, EV_ID queueID)
1023 struct common_queue *queue;
1026 EVK_mutex_lock(); /*************************************/
1028 queue = find_queue_entry(queueID);
1029 if (queue == NULL) {
1030 EVK_info1("set_poll: ID %x not found.\n", queueID);
1035 filp->private_data = (void *)queueID;
1038 EVK_mutex_unlock(); /*************************************/
1043 evk_get_flag_event(EV_Event *ev, int peek_only, int wait, int mem)
1045 struct flag *queue, *queue2;
1050 if (GET_USER_OR_KERNEL(flagID, ev->u.flag.flagID)) /* pgr0039 */
1055 queue = (struct flag *)find_queue_entry(flagID); /* pgr0000 */
1056 if (queue == NULL) {
1057 EVK_info1("get_flag: No such flag %x\n", flagID);
1062 if (queue->value != 0) {
1065 if (GET_USER_OR_KERNEL(bits, ev->u.flag.bits)) { /* pgr0039 */
1070 if (bits == 0 || ((bits & queue->value) != 0)) { /* pgr0000 */
1072 if (PUT_USER_OR_KERNEL(EV_EVENT_Flag, ev->type)) { /* pgr0039 */
1076 if (PUT_USER_OR_KERNEL(queue->value, ev->u.flag.bits)) { /* pgr0039 */
1082 queue->read_pid = current->pid;
1092 if (queue != NULL && wait != 0 && found == 0) {
1094 EVK_mutex_unlock(); /*************************************/
1097 = wait_event_interruptible(queue->wq_head,
1098 ((queue2 = (struct flag *)find_queue_entry(flagID)) == NULL
1099 || queue2->value != 0));
1101 EVK_mutex_lock(); /*************************************/
1103 if (wait_ret != 0) {
1104 EVK_info1("Interrupted while waiting for flag %x\n", flagID);
1106 } else if (queue2 == NULL) { /* pgr0039 */
1107 EVK_info1("flag %x was destroyed while waiting for it\n", flagID);
1117 evk_get_flag64_event(EV_Event *ev, int peek_only, int wait, int mem)
1119 struct flag64 *queue, *queue2;
1124 if (GET_USER_OR_KERNEL(flagID, ev->u.flag64.flagID)) /* pgr0039 */
1129 queue = (struct flag64 *)find_queue_entry(flagID); /* pgr0000 */
1130 if (queue == NULL) {
1131 EVK_info1("get_flag64: No such flag %x\n", flagID);
1136 if (queue->value != 0) {
1139 //GET_USER_OR_KERNEL(bits, ev->u.flag64.bits); /* pgr0039 */
1140 if (mem == evk_mem_user) {
1141 if (copy_from_user(&bits, &(ev->u.flag64.bits), sizeof(bits))) {
1146 bits = ev->u.flag64.bits;
1149 if (bits == 0 || ((bits & queue->value) != 0)) { /* pgr0000 */
1151 if (PUT_USER_OR_KERNEL(EV_EVENT_Flag64, ev->type)) { /* pgr0039 */
1155 if (PUT_USER_OR_KERNEL(queue->value, ev->u.flag64.bits)) { /* pgr0039 */
1161 queue->read_pid = current->pid;
1171 if (queue != NULL && wait != 0 && found == 0) {
1173 EVK_mutex_unlock(); /*************************************/
1176 = wait_event_interruptible(queue->wq_head,
1177 ((queue2 = (struct flag64 *)find_queue_entry(flagID)) == NULL
1178 || queue2->value != 0));
1180 EVK_mutex_lock(); /*************************************/
1182 if (wait_ret != 0) {
1183 EVK_info1("Interrupted while waiting for flag %x\n", flagID);
1185 } else if (queue2 == NULL) { /* pgr0039 */
1186 EVK_info1("flag %x was destroyed while waiting for it\n", flagID);
1195 static __inline__ void
1196 remove_message_event(struct message_queue *queue, UINT8 removeptr)
1205 offset = (int)removeptr - (int)(queue->readptr);
1207 if (offset == 0) {/* To remove the head of the queue, advance the queue by one readptr only */
1209 queue->readptr %= queue->length;
1211 if (queue->num > 0) {
1212 /* Reset the occurrence time of the first message in the queue to the occurrence time of the queue. */
1213 ptr = (queue->message
1214 + queue->readptr * EVK_message_block_size(queue->max_bytes));
1215 ptr += sizeof(UINT32) * 2;
1216 memcpy(&(queue->seq_num), ptr, sizeof(UINT32));
1221 offset += queue->length;
1223 if (offset == queue->num) {/* Do nothing to delete the end of the queue */
1227 /* To delete a message in the middle of the queue, pack the following messages. */
1229 size = EVK_message_block_size(queue->max_bytes);
1231 for(i = 0 ; i < queue->num - offset ; i++, to++) {
1232 to %= queue->length;
1233 from = (to + 1) % queue->length;
1234 pFrom = queue->message + from * size;
1235 pTo = queue->message + to * size;
1236 memcpy(pTo, pFrom, size);
1241 evk_get_message_event(EV_Event *ev, int peek_only, int wait, int mem)
1243 struct message_queue *queue, *queue2;
1251 if (GET_USER_OR_KERNEL(queueID, ev->u.message.queueID)) /* pgr0039 */
1255 queue = (struct message_queue *)find_queue_entry(queueID); /* pgr0000 */
1256 if (queue == NULL) {
1257 EVK_info1("get_message: No such queue %x\n", queueID);
1263 readptr = queue->readptr;
1264 for(i = 0 ; i < num ; i++, readptr = (readptr + 1) % queue->length) {
1265 UINT32 size, senderInfo, seq;
1266 UINT32 length, q_senderInfo;
1268 ptr = (queue->message
1269 + readptr * EVK_message_block_size(queue->max_bytes));
1271 memcpy(&size, ptr, sizeof(size));
1272 ptr += sizeof(size);
1273 memcpy(&senderInfo, ptr, sizeof(senderInfo));
1274 ptr += sizeof(senderInfo);
1275 memcpy(&seq, ptr, sizeof(seq));
1278 if (GET_USER_OR_KERNEL(length, ev->u.message.length)) { /* pgr0039 */
1282 if (GET_USER_OR_KERNEL(q_senderInfo, ev->u.message.senderInfo)) { /* pgr0039 */
1287 if (q_senderInfo == 0 && length == 0) { /* pgr0000 */
1289 } else if (q_senderInfo != 0 && q_senderInfo == senderInfo) {
1291 } else if (length > 0 && size >= length) { /* pgr0000 */
1293 if (mem == evk_mem_user) {
1295 compbytes = evk_malloc(length);
1296 if (compbytes != NULL) {
1297 if (copy_from_user(compbytes, &(ev->u.message.message), length)) {
1299 evk_free(compbytes);
1302 if (memcmp(ptr, compbytes, length) == 0) {
1305 evk_free(compbytes);
1308 if (memcmp(ptr, ev->u.message.message, length) == 0) {
1316 if (PUT_USER_OR_KERNEL(EV_EVENT_Message, ev->type)) { /* pgr0039 */
1320 if (PUT_USER_OR_KERNEL(size, ev->u.message.length)) { /* pgr0039 */
1324 if (PUT_USER_OR_KERNEL(senderInfo, ev->u.message.senderInfo)) { /* pgr0039 */
1328 if (mem == evk_mem_user) {
1329 if (copy_to_user(ev->u.message.message, ptr, size)) {
1334 memcpy(ev->u.message.message, ptr, size);
1337 queue->read_pid = current->pid;
1341 remove_message_event(queue, readptr);
1349 if (queue != NULL && wait != 0 && matched == 0) {
1351 EVK_mutex_unlock(); /*************************************/
1353 = wait_event_interruptible(queue->wq_head,
1354 ((queue2 = (struct message_queue *)find_queue_entry(queueID))==NULL
1355 || queue2->num > 0));
1357 EVK_mutex_lock(); /*************************************/
1359 if (wait_ret != 0) {
1360 EVK_info1("Interrupted while waiting for queue %x\n", queueID);
1362 } else if (queue2 == NULL) { /* pgr0039 */
1363 EVK_info1("queue %x was destroyed while waiting for it\n", queueID);
1374 evk_get_event(EV_Event *ev, int peek_only, int wait, int mem)
1379 if (GET_USER_OR_KERNEL(type, ev->type)) /* pgr0039 */
1381 if (PUT_USER_OR_KERNEL(EV_EVENT_None, ev->type)) /* pgr0039 */
1384 switch(type) { /* pgr0000 */
1386 EVK_mutex_lock(); /*************************************/
1387 ret = evk_get_flag_event(ev, peek_only, wait, mem);
1388 EVK_mutex_unlock(); /*************************************/
1391 case EV_EVENT_Flag64:
1392 EVK_mutex_lock(); /*************************************/
1393 ret = evk_get_flag64_event(ev, peek_only, wait, mem);
1394 EVK_mutex_unlock(); /*************************************/
1397 case EV_EVENT_Message:
1398 EVK_mutex_lock(); /*************************************/
1399 ret = evk_get_message_event(ev, peek_only, wait, mem);
1400 EVK_mutex_unlock(); /*************************************/
1410 evk_get_next_event(EVK_Next_Event_Query *query /* user */, int peek_only)
1413 int i, num, ret, first, found;
1414 struct common_queue *queue;
1415 UINT32 seq_oldest = 0;
1417 ids = (EV_ID *)kmalloc( (sizeof(EV_ID)*EV_MAX_IDS_IN_PROCESS), GFP_KERNEL );
1422 if (__get_user(num, &(query->num))) { /* pgr0039 */
1426 if (copy_from_user(&ids[0], query->ids, num * sizeof(EV_ID))) { /* pgr0039 */
1430 if (__put_user(EV_EVENT_None, &(query->ev.type))) { /* pgr0039 */
1439 EVK_mutex_lock(); /*************************************/
1441 for(i = 0 ; i < num /* pgr0039 */ ; i++) {
1442 queue = find_queue_entry(ids[i]);
1443 if (queue != NULL) {/* Have the specified queue ID */
1444 if ((EV_ID_IS_FLAG(ids[i])
1445 && ((struct flag *)queue)->value != 0)
1446 || (EV_ID_IS_FLAG64(ids[i])
1447 && ((struct flag64 *)queue)->value != 0)
1448 || (EV_ID_IS_QUEUE(ids[i])
1449 && ((struct message_queue *)queue)->num > 0)) {/*There are events.*/
1450 /* Compare with time_before macros for round 0 */
1451 if (first || time_before((unsigned long)queue->seq_num, /* pgr0006 */ /* pgr0039 */
1452 (unsigned long)seq_oldest)) {
1454 seq_oldest = queue->seq_num;
1462 if (EV_ID_IS_FLAG(ids[found])) {
1463 if (__put_user(ids[found], &(query->ev.u.flag.flagID))) { /* pgr0039 */
1467 ret = evk_get_flag_event(&(query->ev), peek_only, 0, evk_mem_user);
1468 } else if (EV_ID_IS_FLAG64(ids[found])) {
1469 if (__put_user(ids[found], &(query->ev.u.flag64.flagID))) { /* pgr0039 */
1473 ret = evk_get_flag64_event(&(query->ev), peek_only, 0, evk_mem_user);
1474 } else if (EV_ID_IS_QUEUE(ids[found])) {
1475 if (__put_user(ids[found], &(query->ev.u.message.queueID))) { /* pgr0039 */
1479 ret = evk_get_message_event(&(query->ev), peek_only, 0, evk_mem_user);
1484 EVK_mutex_unlock(); /*************************************/
1491 evk_ioctl(struct file *filp, unsigned int cmd,
1494 EVK_Message_Queue_Request mesq;
1495 int peek_only, wait;
1502 case EVK_IOC_CREATE_FLAG:
1503 queueID = (EV_ID)arg;
1504 ret = evk_create_flag(queueID);
1507 case EVK_IOC_CREATE_FLAG64:
1508 queueID = (EV_ID)arg;
1509 ret = evk_create_flag64(queueID);
1512 case EVK_IOC_CREATE_MESSAGE_QUEUE:
1513 #if (LINUX_VERSION_CODE >= KERNEL_VERSION(5, 0, 0))
1514 if (access_ok(arg, sizeof(mesq))) { /* pgr0039 */
1516 if (access_ok(VERIFY_READ, arg, sizeof(mesq))) { /* pgr0039 */
1518 if (copy_from_user(&mesq, (EV_Flag *)arg, sizeof(mesq))) {
1523 ret = evk_create_message_queue(mesq.queueID, mesq.length,
1524 mesq.max_bytes, mesq.type);
1530 case EVK_IOC_ALLOC_FLAG_ID:
1531 #if (LINUX_VERSION_CODE >= KERNEL_VERSION(5, 0, 0))
1532 if (access_ok(arg, sizeof(queueID))) { /* pgr0039 */
1534 if (access_ok(VERIFY_WRITE, arg, sizeof(queueID))) { /* pgr0039 */
1536 evk_alloc_flagID(&queueID);
1537 if (put_user(queueID, (EV_ID *)arg)) { /* pgr0039 */
1547 case EVK_IOC_ALLOC_FLAG64_ID:
1548 #if (LINUX_VERSION_CODE >= KERNEL_VERSION(5, 0, 0))
1549 if (access_ok(arg, sizeof(queueID))) { /* pgr0039 */
1551 if (access_ok(VERIFY_WRITE, arg, sizeof(queueID))) { /* pgr0039 */
1553 evk_alloc_flag64ID(&queueID);
1554 if (put_user(queueID, (EV_ID *)arg)) { /* pgr0039 */
1564 case EVK_IOC_ALLOC_QUEUE_ID:
1565 #if (LINUX_VERSION_CODE >= KERNEL_VERSION(5, 0, 0))
1566 if (access_ok(arg, sizeof(queueID))) { /* pgr0039 */
1568 if (access_ok(VERIFY_WRITE, arg, sizeof(queueID))) { /* pgr0039 */
1570 evk_alloc_queueID(&queueID);
1571 if (put_user(queueID, (EV_ID *)arg)) { /* pgr0039 */
1581 case EVK_IOC_DESTROY_QUEUE:
1582 queueID = (EV_ID)arg;
1583 ret = evk_destroy_queue(queueID);
1586 case EVK_IOC_STORE_FLAG:
1587 #if (LINUX_VERSION_CODE >= KERNEL_VERSION(5, 0, 0))
1588 if (access_ok(arg, sizeof(EV_Flag))) { /* pgr0039 */
1590 if (access_ok(VERIFY_READ, arg, sizeof(EV_Flag))) { /* pgr0039 */
1592 ret = evk_store_flag((EV_Flag *)arg, evk_mem_user);
1598 case EVK_IOC_STORE_FLAG64:
1599 #if (LINUX_VERSION_CODE >= KERNEL_VERSION(5, 0, 0))
1600 if (access_ok(arg, sizeof(EV_Flag64))) { /* pgr0039 */
1602 if (access_ok(VERIFY_READ, arg, sizeof(EV_Flag64))) { /* pgr0039 */
1604 ret = evk_store_flag64((EV_Flag64 *)arg, evk_mem_user);
1610 case EVK_IOC_STORE_MESSAGE:
1611 #if (LINUX_VERSION_CODE >= KERNEL_VERSION(5, 0, 0))
1612 if (access_ok(arg, sizeof(EV_Message))) { /* pgr0039 */
1614 if (access_ok(VERIFY_READ, arg, sizeof(EV_Message))) { /* pgr0039 */
1616 ret = evk_store_message((EV_Message *)arg, evk_mem_user);
1622 case EVK_IOC_SET_POLL:
1623 queueID = (EV_ID)arg;
1624 ret = evk_set_poll(filp, queueID);
1627 case EVK_IOC_PEEK_EVENT:
1633 case EVK_IOC_WAIT_EVENT:
1639 case EVK_IOC_GET_EVENT:
1643 #if (LINUX_VERSION_CODE >= KERNEL_VERSION(5, 0, 0))
1644 if (access_ok(arg, sizeof(EV_Event))) { /* pgr0039 */
1646 if (access_ok(VERIFY_WRITE, arg, sizeof(EV_Event))) { /* pgr0039 */
1648 ret = evk_get_event((EV_Event *)arg, peek_only, wait, evk_mem_user);
1654 case EVK_IOC_PEEK_NEXT_EVENT:
1659 case EVK_IOC_GET_NEXT_EVENT:
1662 #if (LINUX_VERSION_CODE >= KERNEL_VERSION(5, 0, 0))
1663 if (access_ok(arg, sizeof(EVK_Next_Event_Query))) { /* pgr0039 */
1665 if (access_ok(VERIFY_WRITE, arg, sizeof(EVK_Next_Event_Query))) { /* pgr0039 */
1667 ret = evk_get_next_event((EVK_Next_Event_Query *)arg, peek_only);
1673 case EVK_IOC_DEBUG_LIST:
1689 evk_poll(struct file *filp, poll_table *wait)
1691 unsigned int ret = 0;
1692 struct common_queue *queue;
1694 EV_ID queueID = (EV_ID)(filp->private_data);
1695 // Returns errors without stopping at assert if queueID is invalid
1696 // (Troubleshooting for Continuous Printing)
1697 if (!EV_ID_IS_VALID(queueID)) {
1698 printk("evk_poll ERROR: invalid queueID=%x\n", queueID);
1699 return POLLERR|POLLHUP;
1701 //EVK_assert(EV_ID_IS_VALID(queueID), "poll: flag/queueID not set");
1703 EVK_mutex_lock();/*****************************************/
1705 queue = find_queue_entry(queueID);
1706 if (queue == NULL) {
1707 EVK_info1("poll: No such flag/queueID %x\n", queueID);
1708 ret = POLLERR|POLLHUP;
1712 poll_wait(filp, &(queue->wq_head), wait);
1714 if (EV_ID_IS_FLAG(queueID)) {
1715 if (((struct flag *)queue)->value != 0) {
1718 } else if (EV_ID_IS_FLAG64(queueID)) {
1719 if (((struct flag64 *)queue)->value != 0) {
1723 if (((struct message_queue *)queue)->num > 0) {
1729 EVK_mutex_unlock(); /***************************************/
1734 /** List of system call corresponding function registrations */
1735 static struct file_operations evk_fops = {
1737 .release = evk_close,
1738 .unlocked_ioctl = evk_ioctl,
1742 #ifdef ENABLE_PROC_FS
1744 evk_procFS_show(struct seq_file *m, int mode)
1747 struct list_head *list, *entries;
1748 struct common_queue *queue;
1750 seq_printf(m, "[ev library status ");
1754 seq_printf(m, "(flag)]\n");
1755 seq_printf(m, "PID moduleID flagID[hash] value\n");
1757 case evk_enum_flag64:
1758 seq_printf(m, "(flag64)]\n");
1759 seq_printf(m, "PID moduleID flagID[hash] value\n");
1761 case evk_enum_queue:
1762 seq_printf(m, "(queue)]\n");
1763 seq_printf(m, "PID moduleID queueID[hash] maxbytes remain type\n");
1769 num = evk_get_queue_entry(&entries);
1771 for (i = 0 ; i < num ; i++) {
1772 list = &(entries[i]);
1773 if (!list_empty(list)) {
1774 list_for_each_entry(queue, list, list) {
1775 if ((mode == evk_enum_flag && (!EV_ID_IS_FLAG(queue->queueID)))
1776 || (mode == evk_enum_flag64 && (!EV_ID_IS_FLAG64(queue->queueID)))
1777 || (mode == evk_enum_queue && (!EV_ID_IS_QUEUE(queue->queueID))))
1782 seq_printf(m, "%08d ", queue->pid);
1783 seq_printf(m, "%05d(%04x) ", ((queue->queueID & 0x00ffff00) >> 8), ((queue->queueID & 0x00ffff00) >> 8));
1784 seq_printf(m, "0x%08x[%2d] ", queue->queueID, calc_hash(queue->queueID));
1788 seq_printf(m, "0x%x", EVK_PFLAG(queue)->value);
1791 case evk_enum_flag64:
1792 seq_printf(m, "0x%llx", EVK_PFLAG64(queue)->value);
1795 case evk_enum_queue:
1796 seq_printf(m, "%04d %02d ", EVK_PQUEUE(queue)->max_bytes, EVK_PQUEUE(queue)->length);
1797 seq_printf(m, "%02d ", EVK_PQUEUE(queue)->num);
1798 seq_printf(m, "%d ", EVK_PQUEUE(queue)->type);
1801 seq_printf(m, "\n");
1811 evk_procFS_flag_show(struct seq_file *m, void *v)
1813 return evk_procFS_show(m, evk_enum_flag);
1817 evk_procFS_flag64_show(struct seq_file *m, void *v)
1819 return evk_procFS_show(m, evk_enum_flag64);
1823 evk_procFS_queue_show(struct seq_file *m, void *v)
1825 return evk_procFS_show(m, evk_enum_queue);
1829 evk_procFS_flag_open(struct inode *inode, struct file *file)
1831 return single_open(file, evk_procFS_flag_show, NULL);
1835 evk_procFS_flag64_open(struct inode *inode, struct file *file)
1837 return single_open(file, evk_procFS_flag64_show, NULL);
1841 evk_procFS_queue_open(struct inode *inode, struct file *file)
1843 return single_open(file, evk_procFS_queue_show, NULL);
1846 #ifdef HAVE_PROC_OPS
1847 static struct proc_ops evk_proc_flag_fops = {
1848 .proc_open = evk_procFS_flag_open,
1849 .proc_read = seq_read,
1850 .proc_lseek = seq_lseek,
1851 .proc_release = single_release,
1854 static struct file_operations evk_proc_flag_fops = {
1855 .owner = THIS_MODULE,
1856 .open = evk_procFS_flag_open,
1858 .llseek = seq_lseek,
1859 .release = single_release,
1861 #endif /* HAVE_PROC_OPS */
1863 #ifdef HAVE_PROC_OPS
1864 static struct proc_ops evk_proc_flag64_fops = {
1865 .proc_open = evk_procFS_flag64_open,
1866 .proc_read = seq_read,
1867 .proc_lseek = seq_lseek,
1868 .proc_release = single_release,
1871 static struct file_operations evk_proc_flag64_fops = {
1872 .owner = THIS_MODULE,
1873 .open = evk_procFS_flag64_open,
1875 .llseek = seq_lseek,
1876 .release = single_release,
1878 #endif /* HAVE_PROC_OPS */
1880 #ifdef HAVE_PROC_OPS
1881 static struct proc_ops evk_proc_queue_fops = {
1882 .proc_open = evk_procFS_queue_open,
1883 .proc_read = seq_read,
1884 .proc_lseek = seq_lseek,
1885 .proc_release = single_release,
1888 static struct file_operations evk_proc_queue_fops = {
1889 .owner = THIS_MODULE,
1890 .open = evk_procFS_queue_open,
1892 .llseek = seq_lseek,
1893 .release = single_release,
1895 #endif /* HAVE_PROC_OPS */
1896 #endif /*ENABLE_PROC_FS*/
1902 #ifdef CONFIG_PROC_FS
1903 #ifdef ENABLE_PROC_FS
1904 struct proc_dir_entry *ret;
1905 #endif /* ENABLE_PROC_FS */
1906 #endif /* CONFIG_PROC_FS */
1910 dev = MKDEV(devmajor, devminor);
1911 err = register_chrdev_region(dev, nrdevs, EVK_NAME);
1913 EVK_info1("register_chrdev_region error %d\n", -err);
1917 cdev_init(&cdev, &evk_fops);
1918 cdev.owner = THIS_MODULE;
1919 cdev.ops = &evk_fops;
1921 err = cdev_add(&cdev, dev, 1);
1923 EVK_info1("cdev_add error %d\n", -err);
1927 /* Initialization */
1928 for(i = 0 ; i < EVK_MAX_FLAGS ; i++) {
1929 list_add_tail((struct list_head *)&(_flag_pool[i]),
1932 for(i = 0 ; i < EVK_MAX_FLAG64S ; i++) {
1933 list_add_tail((struct list_head *)&(_flag64_pool[i]),
1936 for(i = 0 ; i < EVK_MAX_MESSAGE_QUEUES ; i++) {
1937 list_add_tail((struct list_head *)&(_message_queue_pool[i]),
1938 &message_queue_pool);
1940 for(i = 0 ; i < HASH_KEY ; i++) {
1941 INIT_LIST_HEAD(&(queue_entry[i]));
1944 #ifdef CONFIG_PROC_FS
1945 #ifdef ENABLE_PROC_FS
1946 ret = proc_create("driver/ev_flag", 0, NULL, &evk_proc_flag_fops);
1948 EVK_info1("Unable to initialize /proc entry %d\n", -err);
1952 ret = proc_create("driver/ev_flag64", 0, NULL, &evk_proc_flag64_fops);
1954 EVK_info1("Unable to initialize /proc entry %d\n", -err);
1958 ret = proc_create("driver/ev_queue", 0, NULL, &evk_proc_queue_fops);
1960 EVK_info1("Unable to initialize /proc entry %d\n", -err);
1963 #endif /* ENABLE_PROC_FS */
1964 #endif /* CONFIG_PROC_FS */
1966 pClass = class_create(THIS_MODULE, EVK_NAME);
1967 device_create(pClass, NULL, dev, NULL, "agl/"EVK_NAME);
1973 //#ifndef CONFIG_COMBINE_MODULES
1978 dev_t dev = MKDEV(devmajor, devminor);
1979 device_destroy(pClass, dev);
1980 class_destroy(pClass);
1984 unregister_chrdev_region(dev, nrdevs);
1986 remove_proc_entry( "driver/ev_flag", 0 );
1987 remove_proc_entry( "driver/ev_flag64", 0 );
1988 remove_proc_entry( "driver/ev_queue", 0 );
1995 EVK_create_flag_in(EV_ID flagID)
1997 int ret = evk_create_flag(flagID);
1999 if (ret == -EEXIST) {
2000 return EV_ERR_Exist;
2001 } else if (ret < 0) {
2002 return EV_ERR_Fatal;
2009 EVK_create_flag64_in(EV_ID flagID)
2011 int ret = evk_create_flag64(flagID);
2013 if (ret == -EEXIST) {
2014 return EV_ERR_Exist;
2015 } else if (ret < 0) {
2016 return EV_ERR_Fatal;
2022 /** @see EV_create_flag */
2024 EVK_create_flag(EV_ID flagID)
2026 if (!EV_ID_IS_FLAG(flagID) || EV_ID_IS_AUTO_ID(flagID)) {
2027 return EV_ERR_Invalid_ID;
2029 return EVK_create_flag_in(flagID);
2032 /** @see EV_create_flag64 */
2034 EVK_create_flag64(EV_ID flagID)
2036 if (!EV_ID_IS_FLAG64(flagID) || EV_ID_IS_AUTO_ID(flagID)) {
2037 return EV_ERR_Invalid_ID;
2039 return EVK_create_flag64_in(flagID);
2043 EVK_create_queue_in(EV_ID queueID, UINT8 length, UINT16 max_bytes,
2044 EV_Message_Queue_Type type)
2046 int ret = evk_create_message_queue(queueID, length, max_bytes, type);
2048 if (ret == -EEXIST) {
2049 return EV_ERR_Exist;
2050 } else if (ret < 0) {
2051 return EV_ERR_Fatal;
2057 /** @see EV_create_queue */
2059 EVK_create_queue(EV_ID queueID, UINT8 length, UINT16 max_bytes,
2060 EV_Message_Queue_Type type)
2062 if (!EV_ID_IS_QUEUE(queueID) || EV_ID_IS_AUTO_ID(queueID)) {
2063 return EV_ERR_Invalid_ID;
2065 return EVK_create_queue_in(queueID, length, max_bytes, type);
2068 /** @see EV_create_flag_auto_id */
2070 EVK_create_flag_auto_id(/* OUT */EV_ID *flagID)
2073 EVK_assert(flagID != NULL, "NULL pointer was specified");
2075 if (evk_alloc_flagID(flagID) < 0) {
2076 return EV_ERR_Fatal;
2079 err = EVK_create_flag_in(*flagID);
2086 /** @see EV_create_flag64_auto_id */
2088 EVK_create_flag64_auto_id(/* OUT */EV_ID *flagID)
2091 EVK_assert(flagID != NULL, "NULL pointer was specified");
2093 if (evk_alloc_flag64ID(flagID) < 0) {
2094 return EV_ERR_Fatal;
2097 err = EVK_create_flag64_in(*flagID);
2104 /** @see EV_create_queue_auto_id */
2106 EVK_create_queue_auto_id(/* OUT */EV_ID *queueID, UINT8 length,
2107 UINT16 max_bytes, EV_Message_Queue_Type type)
2110 EVK_assert(queueID != NULL, "NULL pointer was specified");
2112 if (evk_alloc_queueID(queueID) < 0) {
2113 return EV_ERR_Fatal;
2116 err = EVK_create_queue_in(*queueID, length, max_bytes, type);
2118 *queueID = EV_NO_ID;
2123 /** @see EV_destroy_flag */
2125 EVK_destroy_flag(EV_ID flagID)
2128 err = evk_destroy_queue(flagID);
2130 if (err == -ENOENT) {
2131 return EV_ERR_Invalid_ID;
2132 } else if (err < 0) {
2133 return EV_ERR_Fatal;
2139 /** @see EV_destroy_queue */
2141 EVK_destroy_queue(EV_ID queueID)
2143 return EVK_destroy_flag(queueID);
2146 /* Sending the event */
2147 /** @see EV_set_flag */
2149 EVK_set_flag(EV_ID flagID, UINT32 bits)
2154 if (!EV_ID_IS_FLAG(flagID)) {
2155 return EV_ERR_Invalid_ID;
2157 flag.flagID = flagID;
2160 ret = evk_store_flag(&flag, evk_mem_kernel);
2161 if (ret == -ENOENT) {
2162 return EV_ERR_Invalid_ID;
2163 } else if (ret < 0) {
2164 return EV_ERR_Fatal;
2170 /** @see EV_set_flag64 */
2172 EVK_set_flag64(EV_ID flagID, UINT64 bits)
2177 if (!EV_ID_IS_FLAG64(flagID)) {
2178 return EV_ERR_Invalid_ID;
2180 flag.flagID = flagID;
2183 ret = evk_store_flag64(&flag, evk_mem_kernel);
2184 if (ret == -ENOENT) {
2185 return EV_ERR_Invalid_ID;
2186 } else if (ret < 0) {
2187 return EV_ERR_Fatal;
2193 /** @see EV_send_message */
2195 EVK_send_message(EV_ID queueID, UINT16 bytes, const void *message,
2198 EV_Message *msg = NULL;
2202 msg = evk_malloc( sizeof( EV_Message ) );
2205 ev_ret = EV_ERR_Fatal;
2209 if (!EV_ID_IS_QUEUE(queueID)) {
2210 ev_ret = EV_ERR_Invalid_ID;
2213 EVK_assert(message != NULL, "NULL pointer was specified");
2214 EVK_assert(bytes <= EV_MAX_MESSAGE_LENGTH, "send_message: message too long");
2216 msg->queueID = queueID;
2217 msg->senderInfo = senderInfo;
2218 msg->length = bytes;
2219 memcpy(msg->message, message, bytes);
2221 ret = evk_store_message(msg, evk_mem_kernel);
2222 if (ret == -ENOENT) {
2223 ev_ret = EV_ERR_Invalid_ID;
2224 } else if (ret == -EBUSY) {
2225 ev_ret = EV_ERR_Busy;
2226 } else if (ret < 0) {
2227 ev_ret = EV_ERR_Fatal;
2240 /* Event acquisition(Order of arrival time) */
2241 //EV_ERR EV_get_next_event(/* OUT */EV_Event *ev);
2244 EVK_get_flag_in(EV_ID flagID, EV_Flag *flag, int peek_only, int wait)
2246 EV_Event *ev = NULL;
2250 ev = evk_malloc( sizeof( EV_Event ) );
2253 ev_ret = EV_ERR_Fatal;
2257 EVK_assert(flag != NULL, "get_flag: NULL pointer was specified");
2258 flag->flagID = EV_NO_ID;
2261 if (!EV_ID_IS_FLAG(flagID)) {
2262 ev_ret = EV_ERR_Invalid_ID;
2266 ev->type = EV_EVENT_Flag;
2267 ev->u.flag.flagID = flagID;
2268 ev->u.flag.bits = 0;
2270 ret = evk_get_event(ev, peek_only, wait, evk_mem_kernel);
2273 if (ret == -ENOENT) {
2274 ev_ret = EV_ERR_Invalid_ID;
2275 } else if (ret == -EINTR) {
2276 ev_ret = EV_ERR_Interrupted;
2278 ev_ret = EV_ERR_Fatal;
2283 if (ev->type == EV_EVENT_Flag) {
2284 flag->flagID = ev->u.flag.flagID;
2285 flag->bits = ev->u.flag.bits;
2298 EVK_get_flag64_in(EV_ID flagID, EV_Flag64 *flag, int peek_only, int wait)
2300 EV_Event *ev = NULL;
2304 ev = evk_malloc( sizeof( EV_Event ) );
2307 ev_ret = EV_ERR_Fatal;
2311 EVK_assert(flag != NULL, "get_flag64: NULL pointer was specified");
2312 flag->flagID = EV_NO_ID;
2315 if (!EV_ID_IS_FLAG64(flagID)) {
2316 ev_ret = EV_ERR_Invalid_ID;
2320 ev->type = EV_EVENT_Flag64;
2321 ev->u.flag64.flagID = flagID;
2322 ev->u.flag64.bits = 0;
2324 ret = evk_get_event(ev, peek_only, wait, evk_mem_kernel);
2327 if (ret == -ENOENT) {
2328 ev_ret = EV_ERR_Invalid_ID;
2329 } else if (ret == -EINTR) {
2330 ev_ret = EV_ERR_Interrupted;
2332 ev_ret = EV_ERR_Fatal;
2337 if (ev->type == EV_EVENT_Flag64) {
2338 flag->flagID = ev->u.flag64.flagID;
2339 flag->bits = ev->u.flag64.bits;
2351 /* Event acquisition(With Search Criteria) */
2352 /** @see EV_get_flag */
2354 EVK_get_flag(EV_ID flagID, /* OUT */EV_Flag *flag)
2356 return EVK_get_flag_in(flagID, flag, 0, 0);
2359 /** @see EV_get_flag64 */
2361 EVK_get_flag64(EV_ID flagID, /* OUT */EV_Flag64 *flag)
2363 return EVK_get_flag64_in(flagID, flag, 0, 0);
2366 /** @see EV_wait_flag */
2368 EVK_wait_flag(EV_ID flagID, /* OUT */EV_Flag *flag)/* block */
2370 return EVK_get_flag_in(flagID, flag, 0, 1);
2373 /** @see EV_wait_flag64 */
2375 EVK_wait_flag64(EV_ID flagID, /* OUT */EV_Flag64 *flag)/* block */
2377 return EVK_get_flag64_in(flagID, flag, 0, 1);
2380 /** @see EV_peek_flag */
2382 EVK_peek_flag(EV_ID flagID, /* OUT */EV_Flag *flag)
2384 return EVK_get_flag_in(flagID, flag, 1, 0);
2387 /** @see EV_peek_flag64 */
2389 EVK_peek_flag64(EV_ID flagID, /* OUT */EV_Flag64 *flag)
2391 return EVK_get_flag64_in(flagID, flag, 1, 0);
2395 EVK_get_message_in(EV_ID queueID, EV_Message *message, UINT32 senderInfo,
2396 UINT32 length, const void *compare_bytes,
2397 int peek_only, int wait)
2399 EV_Event *ev = NULL;
2403 ev = evk_malloc( sizeof( EV_Event ) );
2406 ev_ret = EV_ERR_Fatal;
2410 EVK_assert(message != NULL, "get_message: NULL pointer was specified");
2411 if (!EV_ID_IS_QUEUE(queueID)) {
2412 ev_ret = EV_ERR_Invalid_ID;
2415 message->queueID = EV_NO_ID;
2416 message->senderInfo = 0;
2417 message->length = 0;
2419 ev->type = EV_EVENT_Message;
2420 ev->u.message.queueID = queueID;
2421 ev->u.message.senderInfo = senderInfo;
2422 if (compare_bytes != NULL) {
2423 ev->u.message.length = length;
2424 memcpy(ev->u.message.message, compare_bytes, length);
2426 ev->u.message.length = 0;
2429 ret = evk_get_event(ev, peek_only, wait, evk_mem_kernel);
2432 if (ret == -ENOENT) {
2433 ev_ret = EV_ERR_Invalid_ID;
2434 } else if (ret == -EINTR) {
2435 ev_ret = EV_ERR_Interrupted;
2437 ev_ret = EV_ERR_Fatal;
2442 if (ev->type == EV_EVENT_Message) {
2443 message->queueID = ev->u.message.queueID;
2444 message->senderInfo = ev->u.message.senderInfo;
2445 message->length = ev->u.message.length;
2446 memcpy(message->message, ev->u.message.message, ev->u.message.length);
2458 /** @see EV_get_message */
2460 EVK_get_message(EV_ID queueID, /* OUT */EV_Message *message)
2462 return EVK_get_message_in(queueID, message, 0, 0, NULL, 0, 0);
2465 /** @see EV_wait_message */
2467 EVK_wait_message(EV_ID queueID, /* OUT */EV_Message *message)/* block */
2469 return EVK_get_message_in(queueID, message, 0, 0, NULL, 0, 1);
2472 /** @see EV_peek_message */
2474 EVK_peek_message(EV_ID queueID, /* OUT */EV_Message *message)
2476 return EVK_get_message_in(queueID, message, 0, 0, NULL, 1, 0);
2479 /** @see EV_find_message_by_sender */
2481 EVK_find_message_by_sender(EV_ID queueID, UINT32 senderInfo,
2482 /* OUT */EV_Message *message)
2485 return EV_ERR_Fatal;
2488 /** @see EV_find_message_by_content */
2490 EVK_find_message_by_content(EV_ID queueID, UINT16 length,
2491 const void *compare_bytes,
2492 /* OUT */EV_Message *message)
2495 return EV_ERR_Fatal;
2499 EXPORT_SYMBOL(evk_get_queue_entry);
2500 EXPORT_SYMBOL(EVK_init);
2501 EXPORT_SYMBOL(EVK_exit);
2503 EXPORT_SYMBOL(EVK_create_flag);
2504 EXPORT_SYMBOL(EVK_create_flag64);
2505 EXPORT_SYMBOL(EVK_create_queue);
2506 EXPORT_SYMBOL(EVK_create_flag_auto_id);
2507 EXPORT_SYMBOL(EVK_create_flag64_auto_id);
2508 EXPORT_SYMBOL(EVK_create_queue_auto_id);
2509 EXPORT_SYMBOL(EVK_destroy_flag);
2510 EXPORT_SYMBOL(EVK_destroy_queue);
2511 EXPORT_SYMBOL(EVK_set_flag);
2512 EXPORT_SYMBOL(EVK_set_flag64);
2513 EXPORT_SYMBOL(EVK_send_message);
2514 EXPORT_SYMBOL(EVK_get_flag);
2515 EXPORT_SYMBOL(EVK_wait_flag);
2516 EXPORT_SYMBOL(EVK_peek_flag);
2517 EXPORT_SYMBOL(EVK_get_flag64);
2518 EXPORT_SYMBOL(EVK_wait_flag64);
2519 EXPORT_SYMBOL(EVK_peek_flag64);
2520 EXPORT_SYMBOL(EVK_get_message);
2521 EXPORT_SYMBOL(EVK_wait_message);
2522 EXPORT_SYMBOL(EVK_peek_message);
2524 #ifndef CONFIG_COMBINE_MODULES
2525 //MODULE_LICENSE("proprietary");
2526 MODULE_LICENSE("GPL");
2527 MODULE_DESCRIPTION("EVent library for Kernel");
2528 //MODULE_SUPPORTED_DEVICE(name);
2529 //MODULE_PARM(var,type)
2530 //MODULE_PARM_DESC(var,desc)
2531 module_init(EVK_init);
2532 module_exit(EVK_exit);
2533 #endif /* !CONFIG_COMBINE_MODULES */