2 * drivers/agl/evk_lib.c
4 * Event library (kernel space part)
6 * @copyright Copyright (c) 2016-2020 TOYOTA MOTOR CORPORATION.
8 * This file is free software; you can redistribute it and/or modify
9 * it under the terms of version 2 of the GNU General Public License
10 * as published by the Free Software Foundation.
12 * This program is distributed in the hope that it will be useful,
13 * but WITHOUT ANY WARRANTY; without even the implied warranty of
14 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
15 * GNU General Public License for more details.
17 * You should have received a copy of the GNU General Public License
18 * along with this program; if not, write to the Free Software Foundation,
19 * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301, USA.
22 #include <linux/module.h>
23 #include <linux/init.h>
24 #include <linux/version.h>
25 #include <linux/kernel.h>
26 #include <linux/slab.h>
27 #include <linux/sched.h>
28 #include <linux/wait.h>
29 #include <linux/poll.h>
30 #include <linux/list.h>
31 #include <asm/uaccess.h>
32 #include <linux/errno.h>
33 #include <linux/vmalloc.h>
35 #include <linux/proc_fs.h>
36 #include <linux/seq_file.h>
38 #include <linux/device.h>
39 #include <linux/cdev.h>
41 #include <linux/types.h>
42 #include <linux/ioctl.h>
44 #ifndef STANDARD_INT_DEFINITIONS
45 #define STANDARD_INT_DEFINITIONS
54 #endif /* !STANDARD_INT_DEFINITIONS */
56 #define ENABLE_PROC_FS 1
58 #define EVK_NAME "evk"
59 #define EVK_DEV_NAME "/dev/agl/"EVK_NAME
60 #define EVK_DEV_MAJOR (1033 % 256)
61 #define EVK_IOC_MAGIC 0xE7
63 #define EVK_IOC_CREATE_FLAG _IO(EVK_IOC_MAGIC, 0x00)
64 #define EVK_IOC_CREATE_FLAG64 _IO(EVK_IOC_MAGIC, 0x01)
65 #define EVK_IOC_CREATE_MESSAGE_QUEUE _IO(EVK_IOC_MAGIC, 0x02)
66 #define EVK_IOC_ALLOC_FLAG_ID _IO(EVK_IOC_MAGIC, 0x03)
67 #define EVK_IOC_ALLOC_FLAG64_ID _IO(EVK_IOC_MAGIC, 0x04)
68 #define EVK_IOC_ALLOC_QUEUE_ID _IO(EVK_IOC_MAGIC, 0x05)
69 #define EVK_IOC_DESTROY_QUEUE _IO(EVK_IOC_MAGIC, 0x06)
70 #define EVK_IOC_STORE_FLAG _IO(EVK_IOC_MAGIC, 0x07)
71 #define EVK_IOC_STORE_FLAG64 _IO(EVK_IOC_MAGIC, 0x08)
72 #define EVK_IOC_STORE_MESSAGE _IO(EVK_IOC_MAGIC, 0x09)
73 #define EVK_IOC_SET_POLL _IO(EVK_IOC_MAGIC, 0x0a)
74 #define EVK_IOC_GET_EVENT _IO(EVK_IOC_MAGIC, 0x0b)
75 #define EVK_IOC_PEEK_EVENT _IO(EVK_IOC_MAGIC, 0x0c)
76 #define EVK_IOC_WAIT_EVENT _IO(EVK_IOC_MAGIC, 0x0d)
77 #define EVK_IOC_GET_NEXT_EVENT _IO(EVK_IOC_MAGIC, 0x0e)
78 #define EVK_IOC_PEEK_NEXT_EVENT _IO(EVK_IOC_MAGIC, 0x0f)
79 #define EVK_IOC_DEBUG_LIST _IO(EVK_IOC_MAGIC, 0x10)
81 /** @brief Flag ID and queue ID types used when sending and receiving events
83 * Assign a 32-bit value as follows
85 * - Most significant 8 bits: Reserved
86 * - Next 16 bits: Modue ID
87 * - Lower 8 bits: Define in module
89 * Where module is the modules that creates the queue.
90 * The modules define ID according to the above assignments using EV_Flag_ID_Base and EV_Queue_ID_Base macros.
91 * @see EV_Flag_ID_Base
92 * @see EV_Queue_ID_Base
96 #define EV_ID_BIT 0x80000000UL
97 #define EV_FLAG64_BIT 0x40000000UL
98 #define EV_FLAG_BIT 0x20000000UL
99 #define EV_QUEUE_BIT 0x10000000UL
100 #define EV_AUTO_ID_BIT 0x08000000UL
101 #define EV_RESERVED_BIT 0xff000000UL
102 #define EV_INVALID_ID EV_ID_BIT
103 #define EV_NO_ID EV_INVALID_ID
105 #define EV_ID_IS_FLAG(queueID) \
106 (((queueID) & (EV_ID_BIT|EV_FLAG_BIT)) == (EV_ID_BIT|EV_FLAG_BIT))
107 #define EV_ID_IS_FLAG64(queueID) \
108 (((queueID) & (EV_ID_BIT|EV_FLAG64_BIT)) == (EV_ID_BIT|EV_FLAG64_BIT))
109 #define EV_ID_IS_QUEUE(queueID) \
110 (((queueID) & (EV_ID_BIT|EV_QUEUE_BIT)) == (EV_ID_BIT|EV_QUEUE_BIT))
111 #define EV_ID_IS_AUTO_ID(queueID) \
112 (((queueID) & (EV_ID_BIT|EV_AUTO_ID_BIT)) == (EV_ID_BIT|EV_AUTO_ID_BIT))
113 #define EV_ID_IS_VALID(queueID) \
114 (EV_ID_IS_FLAG(queueID) || EV_ID_IS_FLAG64(queueID) || EV_ID_IS_QUEUE(queueID))
116 /** @brief Macros for defining flag ID
118 * Define the module ID as an argument as follows.
119 * - #define XXX_Module_ID 1
120 * - #define XXX_Flag_ID_Base EV_Flag_ID_Base(XXX_Module_ID)
122 * - #define XXX_Flag_foo (XXX_Flag_ID_Base + 1)
123 * - #define XXX_Flag_bar (XXX_Flag_ID_Base + 2)
125 * The module ID is 16 bits and 0 to 65535 can be specified.
126 * In addition, 0 to 255 added to Base can be defined as ID.
128 #define EV_Flag_ID_Base(mod) (EV_ID_BIT|EV_FLAG_BIT|((mod)<<8))
130 /** @brief Macros for defining 64 bits flag ID
132 * Define the module ID as an argument as follows.
133 * - #define XXX_Module_ID 1
134 * - #define XXX_Flag64_ID_Base EV_Flag64_ID_Base(XXX_Module_ID)
136 * - #define XXX_Flag64_foo (XXX_Flag64_ID_Base + 1)
137 * - #define XXX_Flag64_bar (XXX_Flag64_ID_Base + 2)
139 * The module ID is 16 bits and 0 to 65535 can be specified.
140 * In addition, 0 to 255 added to Base can be defined as ID.
142 #define EV_Flag64_ID_Base(mod) (EV_ID_BIT|EV_FLAG64_BIT|((mod)<<8))
144 /** @brief Macros for defining mesage queue ID
146 * Define the module ID as an argument as follows.
147 * - #define XXX_Module_ID 1
148 * - #define XXX_Queue_ID_Base EV_Queue_ID_Base(XXX_Module_ID)
150 * - #define XXX_Queue_foo (XXX_Queue_ID_Base + 1)
151 * - #define XXX_Queue_bar (XXX_Queue_ID_Base + 2)
153 * The module ID is 16 bits and 0 to 65535 can be specified.
154 * In addition, 0 to 255 added to Base can be defined as ID.
156 #define EV_Queue_ID_Base(mod) (EV_ID_BIT|EV_QUEUE_BIT|((mod)<<8))
158 /** @brief Maximum number of bytes for message event */
159 #define EV_MAX_MESSAGE_LENGTH 2048
161 /** @brief -Maximum number of flag queue that can be created within a thread */
162 #define EV_MAX_IDS_IN_THREAD 24
164 /** @brief -Muximum number of threads that can be registered to the EV in a process */
165 #define EV_MAX_THREADS_IN_PROCESS 16
167 /** @brief -Maximum number of flag queue that can be creat within a process
169 #define EV_MAX_IDS_IN_PROCESS \
170 (EV_MAX_IDS_IN_THREAD * EV_MAX_THREADS_IN_PROCESS)
172 /** @brief Return values for even library function
178 EV_OK = 0, /**< Normal completion */
179 EV_ERR_Exist, /**< The specified flag message queue does exist */
180 EV_ERR_Invalid_ID, /**< The specified flag message queue does not exist */
181 EV_ERR_Busy, /**< Message queue full failed to send */
182 EV_ERR_Interrupted, /**< Waiting function was interrupted by an interrupt */
183 EV_ERR_Thread_Over, /**< Exceeding the number of threads in the process */
184 EV_ERR_Invalid_Thread, /**< Invalid thread ID */
185 EV_ERR_Fatal, /**< Fatal error */
187 /** @brief Return values type for even library function
191 typedef INT32 EV_ERR;
193 /** @brief Event type
195 * Use in the type of EV_Event structs
198 typedef UINT32 EV_Type;
200 /** @brief Bit value representing the type of event */
202 EV_EVENT_None = 0x00000000,
204 /** Flag event: Judged by EV_EVENT_IS_FLAG() */
205 EV_EVENT_Flag = 0x0001,
206 #define EV_EVENT_IS_FLAG(tp) (((tp) & EV_EVENT_Flag) != 0)
208 /** Message event: Judged by EV_EVENT_IS_MESSAGE() */
209 EV_EVENT_Message = 0x0002,
210 #define EV_EVENT_IS_MESSAGE(tp) (((tp) & EV_EVENT_Message) != 0)
212 /** 64bit flag event: Judged by EV_EVENT_IS_FLAG64() */
213 EV_EVENT_Flag64 = 0x0003,
214 #define EV_EVENT_IS_FLAG64(tp) (((tp) & EV_EVENT_Flag64) != 0)
218 /** @brief Flag event structure */
220 EV_ID flagID;/**< Flag ID */
221 UINT32 bits;/**< Bit pattern */
224 /** @brief 64bit flag event structure */
226 EV_ID flagID;/**< Flag ID */
227 UINT64 bits;/**< Bit pattern */
230 /** @brief Message event structure */
232 EV_ID queueID;/**< queue ID */
233 UINT32 senderInfo;/**< Source information */
234 UINT32 length;/**< Number of bytes in the message */
235 UINT32 dummy;/** dummy for pading */
236 UINT8 message[EV_MAX_MESSAGE_LENGTH];/**< Message */
239 /** @brief Event structure */
241 EV_Type type; /**< Event type */
243 EV_Flag flag; /**< Flag event structure */
244 EV_Flag64 flag64; /**< Flag event structure */
245 EV_Message message; /**< Message event structure */
246 } u; /**< Union of structures per eventtype */
249 /** @brief Message event queue type
251 * Specify the action to be taken when the queue overflows (more events are received when the queue is full).
253 enum ev_message_queue_type {
254 EV_MESSAGE_QUEUE_TYPE_BUSY,/**< Return a BUSY to the source */
255 EV_MESSAGE_QUEUE_TYPE_FIFO,/**< Delete the oldest event */
256 EV_MESSAGE_QUEUE_TYPE_REPLACE,/**< Replace the most recent event */
259 /** @brief Messge event queue type
261 * @see ev_message_queue_type
263 typedef UINT8 EV_Message_Queue_Type;
265 /** @addtogroup EV_in */
267 /** In Linux2.4, list_for_each_entry is not provided, so it is prepared by self (in 2.6)
269 #ifdef list_for_each_entry
270 #define __LINUX_26_OR_HIGHER
273 #ifndef __LINUX_26_OR_HIGHER /* linux v2.4 */
275 #define list_for_each_entry(pos, head, member) \
276 for (pos = list_entry((head)->next, typeof(*pos), member), \
277 prefetch(pos->member.next); \
278 &pos->member != (head); \
279 pos = list_entry(pos->member.next, typeof(*pos), member), \
280 prefetch(pos->member.next))
282 #define list_for_each_entry_safe(pos, n, head, member) \
283 for (pos = list_entry((head)->next, typeof(*pos), member), \
284 n = list_entry(pos->member.next, typeof(*pos), member); \
285 &pos->member != (head); \
286 pos = n, n = list_entry(n->member.next, typeof(*n), member))
288 #else /* linux v2.6 */
290 #include <linux/jiffies.h>
292 #endif /* linux v2.6 */
294 #define EVK_assert(cond, mesg) \
296 printk(KERN_ALERT "[EVK]ASSERT(pid:%d): " #cond " at %s:%d; " \
297 #mesg "\n", current->pid, __FILE__, __LINE__); \
301 #define EVK_BUG(mesg) \
302 printk(KERN_ALERT "[EVK]BUG: " mesg); \
306 #define EVK_info0(s) printk(KERN_ALERT "[EVK]INFO: " s)
307 #define EVK_info1(s, t) printk(KERN_ALERT "[EVK]INFO: " s, t)
310 #define EVK_info1(s, t)
313 static int devmajor = EVK_DEV_MAJOR;
314 static int devminor = 0;
315 static int nrdevs = 1;
316 static struct cdev cdev;
317 static struct class *pClass;
319 DEFINE_SEMAPHORE(evk_mtx);
321 static int down_line;
322 #define EVK_mutex_lock() { \
324 down_pid = current->pid; \
325 down_line = __LINE__; \
327 #define EVK_mutex_unlock() (up(&evk_mtx))
329 #ifdef EVK_USE_KMALLOC
330 #define evk_malloc(s) kmalloc((s), GFP_KERNEL)
331 #define evk_free kfree
332 #else // use vmalloc (this is the default)
333 #define evk_malloc(s) vmalloc((s))
334 #define evk_free vfree
348 #define GET_USER_OR_KERNEL(to, from) \
351 if (mem == evk_mem_user) { \
352 err = __get_user((to), &(from)); \
360 #define PUT_USER_OR_KERNEL(value, to) \
363 if (mem == evk_mem_user) { \
364 err = __put_user((value), &(to)); \
373 /** @brief Common part of the flag structure and message queue structure */
374 #define EVK_COMMON_QUEUE_ELEMS \
375 struct list_head list; /**< List structure */ \
376 wait_queue_head_t wq_head; /**< Wait_queue of a process waiting for a queue */\
377 EV_ID queueID; /**< Flag ID/Queue ID */ \
378 UINT32 seq_num; /**< Order of event arrival */ \
379 pid_t read_pid; /**< Read process ID */ \
380 pid_t pid; /**< Owning process ID */
382 /** @brief Common part of the flag structure and message queue structure */
383 struct common_queue {
384 EVK_COMMON_QUEUE_ELEMS
387 /** @brief Flag structure */
389 EVK_COMMON_QUEUE_ELEMS
390 UINT32 value;/**< Flags value */
392 #define EVK_PFLAG(queue) ((struct flag*)queue)
394 /** @brief 64-bit flag structure */
396 EVK_COMMON_QUEUE_ELEMS
397 UINT64 value;/**< Flags value */
399 #define EVK_PFLAG64(queue) ((struct flag64*)queue)
401 /** @brief Message queue structure */
402 struct message_queue {
403 EVK_COMMON_QUEUE_ELEMS
404 UINT8 type;/**< Type */
405 UINT8 length;/**< Queue length */
406 UINT8 num;/**< Number of messages stored */
407 UINT8 readptr;/**< Next read position(0~length-1) */
408 UINT32 max_bytes;/**< -Maximum bytes per message */
409 UINT8 *message;/**< Message storage area (ring buffer) */
411 #define EVK_PQUEUE(queue) ((struct message_queue*)queue)
413 /** @brief Number of bytes to allocate per message
415 * This function allocates an area to store the number of bytes actually stored, the time of occurrence,
416 * and the senderInfo, in addition to the number of bytes specified by max_bytes.
418 #define EVK_message_block_size(max_bytes) (sizeof(UINT32) * 3 + (max_bytes))
422 /** @brief Maximum number of flags used by all systems */
423 #define EVK_MAX_FLAGS 48
424 /** @brief Maximum number of 64-bit flags used by all systems */
425 #define EVK_MAX_FLAG64S 4
426 /** @brief Maximum number of message event queues used by all systems */
427 /* M1SP BM3547 MESSAGE_QUEUES 128->144 */
428 /* M9AT BM2066 MESSAGE_QUEUES 144->218 */
429 #define EVK_MAX_MESSAGE_QUEUES 224
431 /** @brief Allocate flag structure statically */
432 static struct flag _flag_pool[EVK_MAX_FLAGS];
433 /** @brief Statically allocates a 64-bit flag structure */
434 static struct flag64 _flag64_pool[EVK_MAX_FLAG64S];
435 /** @brief Beginning of the list of unused flags */
436 static LIST_HEAD(flag_pool);
437 /** @brief Beginning of the list of unused 64-bit flags */
438 static LIST_HEAD(flag64_pool);
440 /** @brief Allocate message queue structure statically */
441 static struct message_queue _message_queue_pool[EVK_MAX_MESSAGE_QUEUES];
442 /** @brief Top of the list of unused message queues */
443 static LIST_HEAD(message_queue_pool);
445 /** @brief List of Flags/Message Queues in Use
447 * Connects the usage flag/message queue to a list for each hash value obtained from the ID.
448 * The hash value is the remainder of the ID divided by HASH_KEY.
450 static struct list_head queue_entry[HASH_KEY];
452 /** @brief Sequence number to use during automatic ID assignment */
453 static EV_ID sequence_id = 0;
455 /** @brief Number to be assigned in order of occurrence of the event */
456 static UINT32 sequence_number = 0;
459 EV_ID queueID;/**< Queue ID */
460 UINT32 max_bytes;/**< Maximum number of bytes for an event */
461 UINT8 length;/**< Queue length */
462 EV_Message_Queue_Type type;/**< Type */
463 } EVK_Message_Queue_Request;
466 INT32 num; /**< Number of queue ID of search */
467 EV_ID ids[EV_MAX_IDS_IN_PROCESS]; /**< Queue ID of search */
468 EV_Event ev; /**< [OUT] First event that occured */
469 } EVK_Next_Event_Query;
472 evk_get_queue_entry(struct list_head **entries)
475 *entries = queue_entry;
481 static __inline__ int
482 calc_hash(UINT32 val)
484 return val % HASH_KEY;
492 //EVK_assert(!list_empty(&flag_pool), "flag pool empty");
493 if (list_empty(&flag_pool)) {
494 printk("%s ERROR: flag pool empty\n", __func__);
497 queue = (struct flag *)flag_pool.next;
498 list_del_init(&(queue->list));
506 struct flag64 *queue;
507 //EVK_assert(!list_empty(&flag64_pool), "flag64 pool empty");
508 if (list_empty(&flag64_pool)) {
509 printk("%s ERROR: flag64 pool empty\n", __func__);
512 queue = (struct flag64 *)flag64_pool.next;
513 list_del_init(&(queue->list));
517 static __inline__ void
518 free_flag(struct flag *queue)
520 list_add((struct list_head *)queue, &flag_pool);
523 static __inline__ void
524 free_flag64(struct flag64 *queue)
526 list_add((struct list_head *)queue, &flag64_pool);
530 struct message_queue *
531 alloc_message_queue(void)
533 struct message_queue *queue;
534 //EVK_assert(!list_empty(&message_queue_pool), "message queue pool empty");
535 if (list_empty(&message_queue_pool)) {
536 printk("%s ERROR: message queue pool empty\n", __func__);
539 queue = (struct message_queue *)message_queue_pool.next;
540 list_del_init(&(queue->list));
541 queue->message = NULL;
545 static __inline__ void
546 free_message_queue(struct message_queue *queue)
548 if (queue->message != NULL) {
549 evk_free(queue->message);
550 queue->message = NULL;
552 list_add((struct list_head *)queue, &message_queue_pool);
556 struct common_queue *
557 find_queue_entry(EV_ID queueID)
559 struct list_head *list;
560 struct common_queue *queue;
561 int hash = calc_hash(queueID);
562 list = &(queue_entry[hash]);
563 list_for_each_entry(queue, list, list) { /* pgr0060 */ /* pgr0039 */
564 if (queue->queueID == queueID) {
571 static __inline__ void
572 attach_queue_entry(struct common_queue *queue, EV_ID queueID)
574 int hash = calc_hash(queueID);
575 list_add_tail((struct list_head *)queue, &(queue_entry[hash]));
578 static __inline__ void
579 detach_queue_entry(struct common_queue *queue)
581 list_del_init((struct list_head *)queue);
584 static __inline__ void
585 init_common_queue(struct common_queue *queue, EV_ID queueID)
587 queue->queueID = queueID;
588 queue->pid = current->pid;
590 init_waitqueue_head(&(queue->wq_head));
593 static __inline__ void
594 evk_init_flag(struct flag *queue, EV_ID queueID)
596 init_common_queue((struct common_queue *)queue, queueID);
600 static __inline__ void
601 evk_init_flag64(struct flag64 *queue, EV_ID queueID)
603 init_common_queue((struct common_queue *)queue, queueID);
607 static __inline__ int
608 evk_init_message_queue(struct message_queue *queue, EV_ID queueID,
609 UINT8 length, UINT32 max_bytes, UINT8 type)
611 init_common_queue((struct common_queue *)queue, queueID);
613 queue->length = length;
614 queue->max_bytes = max_bytes;
617 EVK_assert(queue->message == NULL, "message buffer was not freed");
618 queue->message = evk_malloc(length * EVK_message_block_size(max_bytes));
619 EVK_assert(queue->message != NULL, "can't alloc message buffer");
626 struct list_head *list;
627 struct common_queue *queue;
629 for(i = 0 ; i < HASH_KEY ; i++) {
630 list = &(queue_entry[i]);
631 if (!list_empty(list)) {
632 printk(KERN_ALERT "%d->", i);
633 list_for_each_entry(queue, list, list) { /* pgr0060 */ /* pgr0039 */
634 printk("%x[%x] ", queue->queueID, queue->seq_num);
642 evk_destroy_queue(EV_ID queueID)
644 struct common_queue *queue;
646 EVK_info1("flag destroy %x\n", queueID);
648 EVK_mutex_lock(); /*************************************/
649 queue = find_queue_entry(queueID);
655 detach_queue_entry(queue);
657 /* wake up processes before destruction */
658 wake_up_interruptible(&(queue->wq_head));
660 init_common_queue(queue, EV_INVALID_ID);
662 if (EV_ID_IS_FLAG(queueID)) {
663 free_flag((struct flag *)queue);
664 } else if (EV_ID_IS_FLAG64(queueID)) {
665 free_flag64((struct flag64 *)queue);
666 } else if (EV_ID_IS_QUEUE(queueID)) {
667 free_message_queue((struct message_queue *)queue);
671 EVK_mutex_unlock(); /*************************************/
676 evk_open(struct inode *inode, struct file *file)
678 // Recording of current and measures not to be read or deleted from others are required. */
679 file->private_data = (void *)EV_INVALID_ID;
684 evk_close(struct inode *inode, struct file *file)
686 if (EV_ID_IS_VALID((EV_ID)file->private_data)) {
687 evk_destroy_queue((EV_ID)file->private_data);
689 file->private_data = (void *)EV_INVALID_ID;
694 evk_create_flag(EV_ID queueID)
698 EVK_info1("flag create %x\n", queueID);
700 EVK_mutex_lock(); /*************************************/
702 queue = (struct flag *)find_queue_entry(queueID);
708 queue = alloc_flag();
714 evk_init_flag(queue, queueID);
715 attach_queue_entry((struct common_queue *)queue, queueID);
719 EVK_mutex_unlock(); /***********************************/
724 evk_create_flag64(EV_ID queueID)
726 struct flag64 *queue;
728 EVK_info1("flag64 create %x\n", queueID);
730 EVK_mutex_lock(); /*************************************/
732 queue = (struct flag64 *)find_queue_entry(queueID);
738 queue = alloc_flag64();
744 evk_init_flag64(queue, queueID);
745 attach_queue_entry((struct common_queue *)queue, queueID);
749 EVK_mutex_unlock(); /***********************************/
754 evk_create_message_queue(EV_ID queueID, UINT8 length,
755 UINT32 max_bytes, EV_Message_Queue_Type type)
757 struct message_queue *queue;
759 EVK_info1("message create %x\n", queueID);
763 EVK_mutex_lock(); /*************************************/
765 queue = (struct message_queue *)find_queue_entry(queueID);
771 queue = alloc_message_queue();
777 err = evk_init_message_queue(queue, queueID, length, max_bytes, type);
779 attach_queue_entry((struct common_queue *)queue, queueID);
781 free_message_queue(queue);
786 EVK_mutex_unlock(); /***********************************/
795 if ((sequence_id & EV_RESERVED_BIT) != 0) {/* round to 1 */
797 EVK_info0("auto ID rounded\n");
804 evk_alloc_flagID(EV_ID *queueID)
808 EVK_mutex_lock(); /*************************************/
810 seq_id = get_seq_id();
811 seq_id |= (EV_ID_BIT | EV_FLAG_BIT | EV_AUTO_ID_BIT);
812 } while(find_queue_entry(seq_id) != NULL);
813 EVK_mutex_unlock(); /*************************************/
820 evk_alloc_flag64ID(EV_ID *queueID)
824 EVK_mutex_lock(); /*************************************/
826 seq_id = get_seq_id();
827 seq_id |= (EV_ID_BIT | EV_FLAG64_BIT | EV_AUTO_ID_BIT);
828 } while(find_queue_entry(seq_id) != NULL);
829 EVK_mutex_unlock(); /*************************************/
836 evk_alloc_queueID(EV_ID *queueID)
840 EVK_mutex_lock(); /*************************************/
842 seq_id = get_seq_id();
843 seq_id |= (EV_ID_BIT | EV_QUEUE_BIT | EV_AUTO_ID_BIT);
844 } while(find_queue_entry(seq_id) != NULL);
845 EVK_mutex_unlock(); /*************************************/
852 evk_store_flag(EV_Flag *ev, int mem)
859 if (GET_USER_OR_KERNEL(flagID, ev->flagID)) /* pgr0039 */
861 if (GET_USER_OR_KERNEL(bits, ev->bits)) /* pgr0039 */
864 EVK_mutex_lock(); /*************************************/
866 queue = (struct flag *)find_queue_entry(flagID); /* pgr0000 */
868 EVK_info1("set_flag: No such ID %x\n", flagID);
873 if (queue->value == 0) {
874 queue->seq_num = sequence_number++;
876 queue->value |= bits; /* pgr0000 */
878 wake_up_interruptible(&(queue->wq_head));
881 EVK_mutex_unlock(); /***********************************/
886 evk_store_flag64(EV_Flag64 *ev, int mem)
888 struct flag64 *queue;
893 if (GET_USER_OR_KERNEL(flagID, ev->flagID)) /* pgr0039 */
895 //GET_USER_OR_KERNEL(bits, ev->bits); /* pgr0039 */
896 if (mem == evk_mem_user) {
897 if (copy_from_user(&bits, &(ev->bits), sizeof(bits)))
903 EVK_mutex_lock(); /*************************************/
905 queue = (struct flag64 *)find_queue_entry(flagID); /* pgr0000 */
907 EVK_info1("set_flag64: No such ID %x\n", flagID);
912 if (queue->value == 0) {
913 queue->seq_num = sequence_number++;
915 queue->value |= bits; /* pgr0000 */
917 wake_up_interruptible(&(queue->wq_head));
920 EVK_mutex_unlock(); /***********************************/
925 evk_store_message(EV_Message *ev, int mem)
927 struct message_queue *queue;
932 UINT32 length, senderInfo, seq;
934 if (GET_USER_OR_KERNEL(queueID, ev->queueID)) /* pgr0039 */
936 if (GET_USER_OR_KERNEL(length, ev->length)) /* pgr0039 */
938 if (GET_USER_OR_KERNEL(senderInfo, ev->senderInfo)) /* pgr0039 */
941 EVK_mutex_lock(); /*************************************/
943 queue = (struct message_queue *)find_queue_entry(queueID); /* pgr0000 */
945 EVK_info1("store_message: No such queueID %x\n", queueID);
950 if (length > queue->max_bytes) { /* pgr0000 */
951 EVK_info0("store_message: message is too long for the queue");
956 if (queue->num == queue->length) {
958 switch(queue->type) {
959 case EV_MESSAGE_QUEUE_TYPE_BUSY:
960 EVK_info1("store_message: queue %x BUSY\n", queueID);
965 case EV_MESSAGE_QUEUE_TYPE_FIFO:
967 queue->readptr %= queue->length;
971 case EV_MESSAGE_QUEUE_TYPE_REPLACE:
976 EVK_BUG("internal error in store_message\n");
983 writeptr = (queue->readptr + queue->num) % queue->length;
984 ptr = queue->message + writeptr * EVK_message_block_size(queue->max_bytes);
986 memcpy(ptr, &length, sizeof(length));
987 ptr += sizeof(length);
988 memcpy(ptr, &senderInfo, sizeof(senderInfo));
989 ptr += sizeof(senderInfo);
990 seq = sequence_number++;
991 memcpy(ptr, &seq, sizeof(seq));
994 if (queue->num == 0) {
995 queue->seq_num = seq;
999 if (mem == evk_mem_user) {
1000 if (copy_from_user(ptr, ev->message, length)) {
1005 memcpy(ptr, ev->message, length);
1008 wake_up_interruptible(&(queue->wq_head));
1011 EVK_mutex_unlock(); /***********************************/
1017 evk_set_poll(struct file *filp, EV_ID queueID)
1019 struct common_queue *queue;
1022 EVK_mutex_lock(); /*************************************/
1024 queue = find_queue_entry(queueID);
1025 if (queue == NULL) {
1026 EVK_info1("set_poll: ID %x not found.\n", queueID);
1031 filp->private_data = (void *)queueID;
1034 EVK_mutex_unlock(); /*************************************/
1039 evk_get_flag_event(EV_Event *ev, int peek_only, int wait, int mem)
1041 struct flag *queue, *queue2;
1046 if (GET_USER_OR_KERNEL(flagID, ev->u.flag.flagID)) /* pgr0039 */
1051 queue = (struct flag *)find_queue_entry(flagID); /* pgr0000 */
1052 if (queue == NULL) {
1053 EVK_info1("get_flag: No such flag %x\n", flagID);
1058 if (queue->value != 0) {
1061 if (GET_USER_OR_KERNEL(bits, ev->u.flag.bits)) { /* pgr0039 */
1066 if (bits == 0 || ((bits & queue->value) != 0)) { /* pgr0000 */
1068 if (PUT_USER_OR_KERNEL(EV_EVENT_Flag, ev->type)) { /* pgr0039 */
1072 if (PUT_USER_OR_KERNEL(queue->value, ev->u.flag.bits)) { /* pgr0039 */
1078 queue->read_pid = current->pid;
1088 if (queue != NULL && wait != 0 && found == 0) {
1090 EVK_mutex_unlock(); /*************************************/
1093 = wait_event_interruptible(queue->wq_head,
1094 ((queue2 = (struct flag *)find_queue_entry(flagID)) == NULL
1095 || queue2->value != 0));
1097 EVK_mutex_lock(); /*************************************/
1099 if (wait_ret != 0) {
1100 EVK_info1("Interrupted while waiting for flag %x\n", flagID);
1102 } else if (queue2 == NULL) { /* pgr0039 */
1103 EVK_info1("flag %x was destroyed while waiting for it\n", flagID);
1113 evk_get_flag64_event(EV_Event *ev, int peek_only, int wait, int mem)
1115 struct flag64 *queue, *queue2;
1120 if (GET_USER_OR_KERNEL(flagID, ev->u.flag64.flagID)) /* pgr0039 */
1125 queue = (struct flag64 *)find_queue_entry(flagID); /* pgr0000 */
1126 if (queue == NULL) {
1127 EVK_info1("get_flag64: No such flag %x\n", flagID);
1132 if (queue->value != 0) {
1135 //GET_USER_OR_KERNEL(bits, ev->u.flag64.bits); /* pgr0039 */
1136 if (mem == evk_mem_user) {
1137 if (copy_from_user(&bits, &(ev->u.flag64.bits), sizeof(bits))) {
1142 bits = ev->u.flag64.bits;
1145 if (bits == 0 || ((bits & queue->value) != 0)) { /* pgr0000 */
1147 if (PUT_USER_OR_KERNEL(EV_EVENT_Flag64, ev->type)) { /* pgr0039 */
1151 if (PUT_USER_OR_KERNEL(queue->value, ev->u.flag64.bits)) { /* pgr0039 */
1157 queue->read_pid = current->pid;
1167 if (queue != NULL && wait != 0 && found == 0) {
1169 EVK_mutex_unlock(); /*************************************/
1172 = wait_event_interruptible(queue->wq_head,
1173 ((queue2 = (struct flag64 *)find_queue_entry(flagID)) == NULL
1174 || queue2->value != 0));
1176 EVK_mutex_lock(); /*************************************/
1178 if (wait_ret != 0) {
1179 EVK_info1("Interrupted while waiting for flag %x\n", flagID);
1181 } else if (queue2 == NULL) { /* pgr0039 */
1182 EVK_info1("flag %x was destroyed while waiting for it\n", flagID);
1191 static __inline__ void
1192 remove_message_event(struct message_queue *queue, UINT8 removeptr)
1201 offset = (int)removeptr - (int)(queue->readptr);
1203 if (offset == 0) {/* To remove the head of the queue, advance the queue by one readptr only */
1205 queue->readptr %= queue->length;
1207 if (queue->num > 0) {
1208 /* Reset the occurrence time of the first message in the queue to the occurrence time of the queue. */
1209 ptr = (queue->message
1210 + queue->readptr * EVK_message_block_size(queue->max_bytes));
1211 ptr += sizeof(UINT32) * 2;
1212 memcpy(&(queue->seq_num), ptr, sizeof(UINT32));
1217 offset += queue->length;
1219 if (offset == queue->num) {/* Do nothing to delete the end of the queue */
1223 /* To delete a message in the middle of the queue, pack the following messages. */
1225 size = EVK_message_block_size(queue->max_bytes);
1227 for(i = 0 ; i < queue->num - offset ; i++, to++) {
1228 to %= queue->length;
1229 from = (to + 1) % queue->length;
1230 pFrom = queue->message + from * size;
1231 pTo = queue->message + to * size;
1232 memcpy(pTo, pFrom, size);
1237 evk_get_message_event(EV_Event *ev, int peek_only, int wait, int mem)
1239 struct message_queue *queue, *queue2;
1247 if (GET_USER_OR_KERNEL(queueID, ev->u.message.queueID)) /* pgr0039 */
1251 queue = (struct message_queue *)find_queue_entry(queueID); /* pgr0000 */
1252 if (queue == NULL) {
1253 EVK_info1("get_message: No such queue %x\n", queueID);
1259 readptr = queue->readptr;
1260 for(i = 0 ; i < num ; i++, readptr = (readptr + 1) % queue->length) {
1261 UINT32 size, senderInfo, seq;
1262 UINT32 length, q_senderInfo;
1264 ptr = (queue->message
1265 + readptr * EVK_message_block_size(queue->max_bytes));
1267 memcpy(&size, ptr, sizeof(size));
1268 ptr += sizeof(size);
1269 memcpy(&senderInfo, ptr, sizeof(senderInfo));
1270 ptr += sizeof(senderInfo);
1271 memcpy(&seq, ptr, sizeof(seq));
1274 if (GET_USER_OR_KERNEL(length, ev->u.message.length)) { /* pgr0039 */
1278 if (GET_USER_OR_KERNEL(q_senderInfo, ev->u.message.senderInfo)) { /* pgr0039 */
1283 if (q_senderInfo == 0 && length == 0) { /* pgr0000 */
1285 } else if (q_senderInfo != 0 && q_senderInfo == senderInfo) {
1287 } else if (length > 0 && size >= length) { /* pgr0000 */
1289 if (mem == evk_mem_user) {
1291 compbytes = evk_malloc(length);
1292 if (compbytes != NULL) {
1293 if (copy_from_user(compbytes, &(ev->u.message.message), length)) {
1295 evk_free(compbytes);
1298 if (memcmp(ptr, compbytes, length) == 0) {
1301 evk_free(compbytes);
1304 if (memcmp(ptr, ev->u.message.message, length) == 0) {
1312 if (PUT_USER_OR_KERNEL(EV_EVENT_Message, ev->type)) { /* pgr0039 */
1316 if (PUT_USER_OR_KERNEL(size, ev->u.message.length)) { /* pgr0039 */
1320 if (PUT_USER_OR_KERNEL(senderInfo, ev->u.message.senderInfo)) { /* pgr0039 */
1324 if (mem == evk_mem_user) {
1325 if (copy_to_user(ev->u.message.message, ptr, size)) {
1330 memcpy(ev->u.message.message, ptr, size);
1333 queue->read_pid = current->pid;
1337 remove_message_event(queue, readptr);
1345 if (queue != NULL && wait != 0 && matched == 0) {
1347 EVK_mutex_unlock(); /*************************************/
1349 = wait_event_interruptible(queue->wq_head,
1350 ((queue2 = (struct message_queue *)find_queue_entry(queueID))==NULL
1351 || queue2->num > 0));
1353 EVK_mutex_lock(); /*************************************/
1355 if (wait_ret != 0) {
1356 EVK_info1("Interrupted while waiting for queue %x\n", queueID);
1358 } else if (queue2 == NULL) { /* pgr0039 */
1359 EVK_info1("queue %x was destroyed while waiting for it\n", queueID);
1370 evk_get_event(EV_Event *ev, int peek_only, int wait, int mem)
1375 if (GET_USER_OR_KERNEL(type, ev->type)) /* pgr0039 */
1377 if (PUT_USER_OR_KERNEL(EV_EVENT_None, ev->type)) /* pgr0039 */
1380 switch(type) { /* pgr0000 */
1382 EVK_mutex_lock(); /*************************************/
1383 ret = evk_get_flag_event(ev, peek_only, wait, mem);
1384 EVK_mutex_unlock(); /*************************************/
1387 case EV_EVENT_Flag64:
1388 EVK_mutex_lock(); /*************************************/
1389 ret = evk_get_flag64_event(ev, peek_only, wait, mem);
1390 EVK_mutex_unlock(); /*************************************/
1393 case EV_EVENT_Message:
1394 EVK_mutex_lock(); /*************************************/
1395 ret = evk_get_message_event(ev, peek_only, wait, mem);
1396 EVK_mutex_unlock(); /*************************************/
1406 evk_get_next_event(EVK_Next_Event_Query *query /* user */, int peek_only)
1409 int i, num, ret, first, found;
1410 struct common_queue *queue;
1411 UINT32 seq_oldest = 0;
1413 ids = (EV_ID *)kmalloc( (sizeof(EV_ID)*EV_MAX_IDS_IN_PROCESS), GFP_KERNEL );
1418 if (__get_user(num, &(query->num))) { /* pgr0039 */
1422 if (copy_from_user(&ids[0], query->ids, num * sizeof(EV_ID))) { /* pgr0039 */
1426 if (__put_user(EV_EVENT_None, &(query->ev.type))) { /* pgr0039 */
1435 EVK_mutex_lock(); /*************************************/
1437 for(i = 0 ; i < num /* pgr0039 */ ; i++) {
1438 queue = find_queue_entry(ids[i]);
1439 if (queue != NULL) {/* Have the specified queue ID */
1440 if ((EV_ID_IS_FLAG(ids[i])
1441 && ((struct flag *)queue)->value != 0)
1442 || (EV_ID_IS_FLAG64(ids[i])
1443 && ((struct flag64 *)queue)->value != 0)
1444 || (EV_ID_IS_QUEUE(ids[i])
1445 && ((struct message_queue *)queue)->num > 0)) {/*There are events.*/
1446 /* Compare with time_before macros for round 0 */
1447 if (first || time_before((unsigned long)queue->seq_num, /* pgr0006 */ /* pgr0039 */
1448 (unsigned long)seq_oldest)) {
1450 seq_oldest = queue->seq_num;
1458 if (EV_ID_IS_FLAG(ids[found])) {
1459 if (__put_user(ids[found], &(query->ev.u.flag.flagID))) { /* pgr0039 */
1463 ret = evk_get_flag_event(&(query->ev), peek_only, 0, evk_mem_user);
1464 } else if (EV_ID_IS_FLAG64(ids[found])) {
1465 if (__put_user(ids[found], &(query->ev.u.flag64.flagID))) { /* pgr0039 */
1469 ret = evk_get_flag64_event(&(query->ev), peek_only, 0, evk_mem_user);
1470 } else if (EV_ID_IS_QUEUE(ids[found])) {
1471 if (__put_user(ids[found], &(query->ev.u.message.queueID))) { /* pgr0039 */
1475 ret = evk_get_message_event(&(query->ev), peek_only, 0, evk_mem_user);
1480 EVK_mutex_unlock(); /*************************************/
1487 evk_ioctl(struct file *filp, unsigned int cmd,
1490 EVK_Message_Queue_Request mesq;
1491 int peek_only, wait;
1498 case EVK_IOC_CREATE_FLAG:
1499 queueID = (EV_ID)arg;
1500 ret = evk_create_flag(queueID);
1503 case EVK_IOC_CREATE_FLAG64:
1504 queueID = (EV_ID)arg;
1505 ret = evk_create_flag64(queueID);
1508 case EVK_IOC_CREATE_MESSAGE_QUEUE:
1509 #if (LINUX_VERSION_CODE >= KERNEL_VERSION(5, 0, 0))
1510 if (access_ok(arg, sizeof(mesq))) { /* pgr0039 */
1512 if (access_ok(VERIFY_READ, arg, sizeof(mesq))) { /* pgr0039 */
1514 if (copy_from_user(&mesq, (EV_Flag *)arg, sizeof(mesq))) {
1519 ret = evk_create_message_queue(mesq.queueID, mesq.length,
1520 mesq.max_bytes, mesq.type);
1526 case EVK_IOC_ALLOC_FLAG_ID:
1527 #if (LINUX_VERSION_CODE >= KERNEL_VERSION(5, 0, 0))
1528 if (access_ok(arg, sizeof(queueID))) { /* pgr0039 */
1530 if (access_ok(VERIFY_WRITE, arg, sizeof(queueID))) { /* pgr0039 */
1532 evk_alloc_flagID(&queueID);
1533 if (put_user(queueID, (EV_ID *)arg)) { /* pgr0039 */
1543 case EVK_IOC_ALLOC_FLAG64_ID:
1544 #if (LINUX_VERSION_CODE >= KERNEL_VERSION(5, 0, 0))
1545 if (access_ok(arg, sizeof(queueID))) { /* pgr0039 */
1547 if (access_ok(VERIFY_WRITE, arg, sizeof(queueID))) { /* pgr0039 */
1549 evk_alloc_flag64ID(&queueID);
1550 if (put_user(queueID, (EV_ID *)arg)) { /* pgr0039 */
1560 case EVK_IOC_ALLOC_QUEUE_ID:
1561 #if (LINUX_VERSION_CODE >= KERNEL_VERSION(5, 0, 0))
1562 if (access_ok(arg, sizeof(queueID))) { /* pgr0039 */
1564 if (access_ok(VERIFY_WRITE, arg, sizeof(queueID))) { /* pgr0039 */
1566 evk_alloc_queueID(&queueID);
1567 if (put_user(queueID, (EV_ID *)arg)) { /* pgr0039 */
1577 case EVK_IOC_DESTROY_QUEUE:
1578 queueID = (EV_ID)arg;
1579 ret = evk_destroy_queue(queueID);
1582 case EVK_IOC_STORE_FLAG:
1583 #if (LINUX_VERSION_CODE >= KERNEL_VERSION(5, 0, 0))
1584 if (access_ok(arg, sizeof(EV_Flag))) { /* pgr0039 */
1586 if (access_ok(VERIFY_READ, arg, sizeof(EV_Flag))) { /* pgr0039 */
1588 ret = evk_store_flag((EV_Flag *)arg, evk_mem_user);
1594 case EVK_IOC_STORE_FLAG64:
1595 #if (LINUX_VERSION_CODE >= KERNEL_VERSION(5, 0, 0))
1596 if (access_ok(arg, sizeof(EV_Flag64))) { /* pgr0039 */
1598 if (access_ok(VERIFY_READ, arg, sizeof(EV_Flag64))) { /* pgr0039 */
1600 ret = evk_store_flag64((EV_Flag64 *)arg, evk_mem_user);
1606 case EVK_IOC_STORE_MESSAGE:
1607 #if (LINUX_VERSION_CODE >= KERNEL_VERSION(5, 0, 0))
1608 if (access_ok(arg, sizeof(EV_Message))) { /* pgr0039 */
1610 if (access_ok(VERIFY_READ, arg, sizeof(EV_Message))) { /* pgr0039 */
1612 ret = evk_store_message((EV_Message *)arg, evk_mem_user);
1618 case EVK_IOC_SET_POLL:
1619 queueID = (EV_ID)arg;
1620 ret = evk_set_poll(filp, queueID);
1623 case EVK_IOC_PEEK_EVENT:
1629 case EVK_IOC_WAIT_EVENT:
1635 case EVK_IOC_GET_EVENT:
1639 #if (LINUX_VERSION_CODE >= KERNEL_VERSION(5, 0, 0))
1640 if (access_ok(arg, sizeof(EV_Event))) { /* pgr0039 */
1642 if (access_ok(VERIFY_WRITE, arg, sizeof(EV_Event))) { /* pgr0039 */
1644 ret = evk_get_event((EV_Event *)arg, peek_only, wait, evk_mem_user);
1650 case EVK_IOC_PEEK_NEXT_EVENT:
1655 case EVK_IOC_GET_NEXT_EVENT:
1658 #if (LINUX_VERSION_CODE >= KERNEL_VERSION(5, 0, 0))
1659 if (access_ok(arg, sizeof(EVK_Next_Event_Query))) { /* pgr0039 */
1661 if (access_ok(VERIFY_WRITE, arg, sizeof(EVK_Next_Event_Query))) { /* pgr0039 */
1663 ret = evk_get_next_event((EVK_Next_Event_Query *)arg, peek_only);
1669 case EVK_IOC_DEBUG_LIST:
1685 evk_poll(struct file *filp, poll_table *wait)
1687 unsigned int ret = 0;
1688 struct common_queue *queue;
1690 EV_ID queueID = (EV_ID)(filp->private_data);
1691 // Returns errors without stopping at assert if queueID is invalid
1692 // (Troubleshooting for Continuous Printing)
1693 if (!EV_ID_IS_VALID(queueID)) {
1694 printk("evk_poll ERROR: invalid queueID=%x\n", queueID);
1695 return POLLERR|POLLHUP;
1697 //EVK_assert(EV_ID_IS_VALID(queueID), "poll: flag/queueID not set");
1699 EVK_mutex_lock();/*****************************************/
1701 queue = find_queue_entry(queueID);
1702 if (queue == NULL) {
1703 EVK_info1("poll: No such flag/queueID %x\n", queueID);
1704 ret = POLLERR|POLLHUP;
1708 poll_wait(filp, &(queue->wq_head), wait);
1710 if (EV_ID_IS_FLAG(queueID)) {
1711 if (((struct flag *)queue)->value != 0) {
1714 } else if (EV_ID_IS_FLAG64(queueID)) {
1715 if (((struct flag64 *)queue)->value != 0) {
1719 if (((struct message_queue *)queue)->num > 0) {
1725 EVK_mutex_unlock(); /***************************************/
1730 /** List of system call corresponding function registrations */
1731 static struct file_operations evk_fops = {
1733 .release = evk_close,
1734 .unlocked_ioctl = evk_ioctl,
1738 #ifdef ENABLE_PROC_FS
1740 evk_procFS_show(struct seq_file *m, int mode)
1743 struct list_head *list, *entries;
1744 struct common_queue *queue;
1746 seq_printf(m, "[ev library status ");
1750 seq_printf(m, "(flag)]\n");
1751 seq_printf(m, "PID moduleID flagID[hash] value\n");
1753 case evk_enum_flag64:
1754 seq_printf(m, "(flag64)]\n");
1755 seq_printf(m, "PID moduleID flagID[hash] value\n");
1757 case evk_enum_queue:
1758 seq_printf(m, "(queue)]\n");
1759 seq_printf(m, "PID moduleID queueID[hash] maxbytes remain type\n");
1765 num = evk_get_queue_entry(&entries);
1767 for (i = 0 ; i < num ; i++) {
1768 list = &(entries[i]);
1769 if (!list_empty(list)) {
1770 list_for_each_entry(queue, list, list) {
1771 if ((mode == evk_enum_flag && (!EV_ID_IS_FLAG(queue->queueID)))
1772 || (mode == evk_enum_flag64 && (!EV_ID_IS_FLAG64(queue->queueID)))
1773 || (mode == evk_enum_queue && (!EV_ID_IS_QUEUE(queue->queueID))))
1778 seq_printf(m, "%08d ", queue->pid);
1779 seq_printf(m, "%05d(%04x) ", ((queue->queueID & 0x00ffff00) >> 8), ((queue->queueID & 0x00ffff00) >> 8));
1780 seq_printf(m, "0x%08x[%2d] ", queue->queueID, calc_hash(queue->queueID));
1784 seq_printf(m, "0x%x", EVK_PFLAG(queue)->value);
1787 case evk_enum_flag64:
1788 seq_printf(m, "0x%llx", EVK_PFLAG64(queue)->value);
1791 case evk_enum_queue:
1792 seq_printf(m, "%04d %02d ", EVK_PQUEUE(queue)->max_bytes, EVK_PQUEUE(queue)->length);
1793 seq_printf(m, "%02d ", EVK_PQUEUE(queue)->num);
1794 seq_printf(m, "%d ", EVK_PQUEUE(queue)->type);
1797 seq_printf(m, "\n");
1807 evk_procFS_flag_show(struct seq_file *m, void *v)
1809 return evk_procFS_show(m, evk_enum_flag);
1813 evk_procFS_flag64_show(struct seq_file *m, void *v)
1815 return evk_procFS_show(m, evk_enum_flag64);
1819 evk_procFS_queue_show(struct seq_file *m, void *v)
1821 return evk_procFS_show(m, evk_enum_queue);
1825 evk_procFS_flag_open(struct inode *inode, struct file *file)
1827 return single_open(file, evk_procFS_flag_show, NULL);
1831 evk_procFS_flag64_open(struct inode *inode, struct file *file)
1833 return single_open(file, evk_procFS_flag64_show, NULL);
1837 evk_procFS_queue_open(struct inode *inode, struct file *file)
1839 return single_open(file, evk_procFS_queue_show, NULL);
1842 static struct file_operations evk_proc_flag_fops = {
1843 .owner = THIS_MODULE,
1844 .open = evk_procFS_flag_open,
1846 .llseek = seq_lseek,
1847 .release = single_release,
1850 static struct file_operations evk_proc_flag64_fops = {
1851 .owner = THIS_MODULE,
1852 .open = evk_procFS_flag64_open,
1854 .llseek = seq_lseek,
1855 .release = single_release,
1858 static struct file_operations evk_proc_queue_fops = {
1859 .owner = THIS_MODULE,
1860 .open = evk_procFS_queue_open,
1862 .llseek = seq_lseek,
1863 .release = single_release,
1865 #endif /*ENABLE_PROC_FS*/
1871 #ifdef CONFIG_PROC_FS
1872 #ifdef ENABLE_PROC_FS
1873 struct proc_dir_entry *ret;
1874 #endif /* ENABLE_PROC_FS */
1875 #endif /* CONFIG_PROC_FS */
1879 dev = MKDEV(devmajor, devminor);
1880 err = register_chrdev_region(dev, nrdevs, EVK_NAME);
1882 EVK_info1("register_chrdev_region error %d\n", -err);
1886 cdev_init(&cdev, &evk_fops);
1887 cdev.owner = THIS_MODULE;
1888 cdev.ops = &evk_fops;
1890 err = cdev_add(&cdev, dev, 1);
1892 EVK_info1("cdev_add error %d\n", -err);
1896 /* Initialization */
1897 for(i = 0 ; i < EVK_MAX_FLAGS ; i++) {
1898 list_add_tail((struct list_head *)&(_flag_pool[i]),
1901 for(i = 0 ; i < EVK_MAX_FLAG64S ; i++) {
1902 list_add_tail((struct list_head *)&(_flag64_pool[i]),
1905 for(i = 0 ; i < EVK_MAX_MESSAGE_QUEUES ; i++) {
1906 list_add_tail((struct list_head *)&(_message_queue_pool[i]),
1907 &message_queue_pool);
1909 for(i = 0 ; i < HASH_KEY ; i++) {
1910 INIT_LIST_HEAD(&(queue_entry[i]));
1913 #ifdef CONFIG_PROC_FS
1914 #ifdef ENABLE_PROC_FS
1915 ret = proc_create("driver/ev_flag", 0, NULL, &evk_proc_flag_fops);
1917 EVK_info1("Unable to initialize /proc entry %d\n", -err);
1921 ret = proc_create("driver/ev_flag64", 0, NULL, &evk_proc_flag64_fops);
1923 EVK_info1("Unable to initialize /proc entry %d\n", -err);
1927 ret = proc_create("driver/ev_queue", 0, NULL, &evk_proc_queue_fops);
1929 EVK_info1("Unable to initialize /proc entry %d\n", -err);
1932 #endif /* ENABLE_PROC_FS */
1933 #endif /* CONFIG_PROC_FS */
1935 pClass = class_create(THIS_MODULE, EVK_NAME);
1936 device_create(pClass, NULL, dev, NULL, "agl/"EVK_NAME);
1942 //#ifndef CONFIG_COMBINE_MODULES
1947 dev_t dev = MKDEV(devmajor, devminor);
1948 device_destroy(pClass, dev);
1949 class_destroy(pClass);
1953 unregister_chrdev_region(dev, nrdevs);
1955 remove_proc_entry( "driver/ev_flag", 0 );
1956 remove_proc_entry( "driver/ev_flag64", 0 );
1957 remove_proc_entry( "driver/ev_queue", 0 );
1964 EVK_create_flag_in(EV_ID flagID)
1966 int ret = evk_create_flag(flagID);
1968 if (ret == -EEXIST) {
1969 return EV_ERR_Exist;
1970 } else if (ret < 0) {
1971 return EV_ERR_Fatal;
1978 EVK_create_flag64_in(EV_ID flagID)
1980 int ret = evk_create_flag64(flagID);
1982 if (ret == -EEXIST) {
1983 return EV_ERR_Exist;
1984 } else if (ret < 0) {
1985 return EV_ERR_Fatal;
1991 /** @see EV_create_flag */
1993 EVK_create_flag(EV_ID flagID)
1995 if (!EV_ID_IS_FLAG(flagID) || EV_ID_IS_AUTO_ID(flagID)) {
1996 return EV_ERR_Invalid_ID;
1998 return EVK_create_flag_in(flagID);
2001 /** @see EV_create_flag64 */
2003 EVK_create_flag64(EV_ID flagID)
2005 if (!EV_ID_IS_FLAG64(flagID) || EV_ID_IS_AUTO_ID(flagID)) {
2006 return EV_ERR_Invalid_ID;
2008 return EVK_create_flag64_in(flagID);
2012 EVK_create_queue_in(EV_ID queueID, UINT8 length, UINT16 max_bytes,
2013 EV_Message_Queue_Type type)
2015 int ret = evk_create_message_queue(queueID, length, max_bytes, type);
2017 if (ret == -EEXIST) {
2018 return EV_ERR_Exist;
2019 } else if (ret < 0) {
2020 return EV_ERR_Fatal;
2026 /** @see EV_create_queue */
2028 EVK_create_queue(EV_ID queueID, UINT8 length, UINT16 max_bytes,
2029 EV_Message_Queue_Type type)
2031 if (!EV_ID_IS_QUEUE(queueID) || EV_ID_IS_AUTO_ID(queueID)) {
2032 return EV_ERR_Invalid_ID;
2034 return EVK_create_queue_in(queueID, length, max_bytes, type);
2037 /** @see EV_create_flag_auto_id */
2039 EVK_create_flag_auto_id(/* OUT */EV_ID *flagID)
2042 EVK_assert(flagID != NULL, "NULL pointer was specified");
2044 if (evk_alloc_flagID(flagID) < 0) {
2045 return EV_ERR_Fatal;
2048 err = EVK_create_flag_in(*flagID);
2055 /** @see EV_create_flag64_auto_id */
2057 EVK_create_flag64_auto_id(/* OUT */EV_ID *flagID)
2060 EVK_assert(flagID != NULL, "NULL pointer was specified");
2062 if (evk_alloc_flag64ID(flagID) < 0) {
2063 return EV_ERR_Fatal;
2066 err = EVK_create_flag64_in(*flagID);
2073 /** @see EV_create_queue_auto_id */
2075 EVK_create_queue_auto_id(/* OUT */EV_ID *queueID, UINT8 length,
2076 UINT16 max_bytes, EV_Message_Queue_Type type)
2079 EVK_assert(queueID != NULL, "NULL pointer was specified");
2081 if (evk_alloc_queueID(queueID) < 0) {
2082 return EV_ERR_Fatal;
2085 err = EVK_create_queue_in(*queueID, length, max_bytes, type);
2087 *queueID = EV_NO_ID;
2092 /** @see EV_destroy_flag */
2094 EVK_destroy_flag(EV_ID flagID)
2097 err = evk_destroy_queue(flagID);
2099 if (err == -ENOENT) {
2100 return EV_ERR_Invalid_ID;
2101 } else if (err < 0) {
2102 return EV_ERR_Fatal;
2108 /** @see EV_destroy_queue */
2110 EVK_destroy_queue(EV_ID queueID)
2112 return EVK_destroy_flag(queueID);
2115 /* Sending the event */
2116 /** @see EV_set_flag */
2118 EVK_set_flag(EV_ID flagID, UINT32 bits)
2123 if (!EV_ID_IS_FLAG(flagID)) {
2124 return EV_ERR_Invalid_ID;
2126 flag.flagID = flagID;
2129 ret = evk_store_flag(&flag, evk_mem_kernel);
2130 if (ret == -ENOENT) {
2131 return EV_ERR_Invalid_ID;
2132 } else if (ret < 0) {
2133 return EV_ERR_Fatal;
2139 /** @see EV_set_flag64 */
2141 EVK_set_flag64(EV_ID flagID, UINT64 bits)
2146 if (!EV_ID_IS_FLAG64(flagID)) {
2147 return EV_ERR_Invalid_ID;
2149 flag.flagID = flagID;
2152 ret = evk_store_flag64(&flag, evk_mem_kernel);
2153 if (ret == -ENOENT) {
2154 return EV_ERR_Invalid_ID;
2155 } else if (ret < 0) {
2156 return EV_ERR_Fatal;
2162 /** @see EV_send_message */
2164 EVK_send_message(EV_ID queueID, UINT16 bytes, const void *message,
2167 EV_Message *msg = NULL;
2171 msg = evk_malloc( sizeof( EV_Message ) );
2174 ev_ret = EV_ERR_Fatal;
2178 if (!EV_ID_IS_QUEUE(queueID)) {
2179 ev_ret = EV_ERR_Invalid_ID;
2182 EVK_assert(message != NULL, "NULL pointer was specified");
2183 EVK_assert(bytes <= EV_MAX_MESSAGE_LENGTH, "send_message: message too long");
2185 msg->queueID = queueID;
2186 msg->senderInfo = senderInfo;
2187 msg->length = bytes;
2188 memcpy(msg->message, message, bytes);
2190 ret = evk_store_message(msg, evk_mem_kernel);
2191 if (ret == -ENOENT) {
2192 ev_ret = EV_ERR_Invalid_ID;
2193 } else if (ret == -EBUSY) {
2194 ev_ret = EV_ERR_Busy;
2195 } else if (ret < 0) {
2196 ev_ret = EV_ERR_Fatal;
2209 /* Event acquisition(Order of arrival time) */
2210 //EV_ERR EV_get_next_event(/* OUT */EV_Event *ev);
2213 EVK_get_flag_in(EV_ID flagID, EV_Flag *flag, int peek_only, int wait)
2215 EV_Event *ev = NULL;
2219 ev = evk_malloc( sizeof( EV_Event ) );
2222 ev_ret = EV_ERR_Fatal;
2226 EVK_assert(flag != NULL, "get_flag: NULL pointer was specified");
2227 flag->flagID = EV_NO_ID;
2230 if (!EV_ID_IS_FLAG(flagID)) {
2231 ev_ret = EV_ERR_Invalid_ID;
2235 ev->type = EV_EVENT_Flag;
2236 ev->u.flag.flagID = flagID;
2237 ev->u.flag.bits = 0;
2239 ret = evk_get_event(ev, peek_only, wait, evk_mem_kernel);
2242 if (ret == -ENOENT) {
2243 ev_ret = EV_ERR_Invalid_ID;
2244 } else if (ret == -EINTR) {
2245 ev_ret = EV_ERR_Interrupted;
2247 ev_ret = EV_ERR_Fatal;
2252 if (ev->type == EV_EVENT_Flag) {
2253 flag->flagID = ev->u.flag.flagID;
2254 flag->bits = ev->u.flag.bits;
2267 EVK_get_flag64_in(EV_ID flagID, EV_Flag64 *flag, int peek_only, int wait)
2269 EV_Event *ev = NULL;
2273 ev = evk_malloc( sizeof( EV_Event ) );
2276 ev_ret = EV_ERR_Fatal;
2280 EVK_assert(flag != NULL, "get_flag64: NULL pointer was specified");
2281 flag->flagID = EV_NO_ID;
2284 if (!EV_ID_IS_FLAG64(flagID)) {
2285 ev_ret = EV_ERR_Invalid_ID;
2289 ev->type = EV_EVENT_Flag64;
2290 ev->u.flag64.flagID = flagID;
2291 ev->u.flag64.bits = 0;
2293 ret = evk_get_event(ev, peek_only, wait, evk_mem_kernel);
2296 if (ret == -ENOENT) {
2297 ev_ret = EV_ERR_Invalid_ID;
2298 } else if (ret == -EINTR) {
2299 ev_ret = EV_ERR_Interrupted;
2301 ev_ret = EV_ERR_Fatal;
2306 if (ev->type == EV_EVENT_Flag64) {
2307 flag->flagID = ev->u.flag64.flagID;
2308 flag->bits = ev->u.flag64.bits;
2320 /* Event acquisition(With Search Criteria) */
2321 /** @see EV_get_flag */
2323 EVK_get_flag(EV_ID flagID, /* OUT */EV_Flag *flag)
2325 return EVK_get_flag_in(flagID, flag, 0, 0);
2328 /** @see EV_get_flag64 */
2330 EVK_get_flag64(EV_ID flagID, /* OUT */EV_Flag64 *flag)
2332 return EVK_get_flag64_in(flagID, flag, 0, 0);
2335 /** @see EV_wait_flag */
2337 EVK_wait_flag(EV_ID flagID, /* OUT */EV_Flag *flag)/* block */
2339 return EVK_get_flag_in(flagID, flag, 0, 1);
2342 /** @see EV_wait_flag64 */
2344 EVK_wait_flag64(EV_ID flagID, /* OUT */EV_Flag64 *flag)/* block */
2346 return EVK_get_flag64_in(flagID, flag, 0, 1);
2349 /** @see EV_peek_flag */
2351 EVK_peek_flag(EV_ID flagID, /* OUT */EV_Flag *flag)
2353 return EVK_get_flag_in(flagID, flag, 1, 0);
2356 /** @see EV_peek_flag64 */
2358 EVK_peek_flag64(EV_ID flagID, /* OUT */EV_Flag64 *flag)
2360 return EVK_get_flag64_in(flagID, flag, 1, 0);
2364 EVK_get_message_in(EV_ID queueID, EV_Message *message, UINT32 senderInfo,
2365 UINT32 length, const void *compare_bytes,
2366 int peek_only, int wait)
2368 EV_Event *ev = NULL;
2372 ev = evk_malloc( sizeof( EV_Event ) );
2375 ev_ret = EV_ERR_Fatal;
2379 EVK_assert(message != NULL, "get_message: NULL pointer was specified");
2380 if (!EV_ID_IS_QUEUE(queueID)) {
2381 ev_ret = EV_ERR_Invalid_ID;
2384 message->queueID = EV_NO_ID;
2385 message->senderInfo = 0;
2386 message->length = 0;
2388 ev->type = EV_EVENT_Message;
2389 ev->u.message.queueID = queueID;
2390 ev->u.message.senderInfo = senderInfo;
2391 if (compare_bytes != NULL) {
2392 ev->u.message.length = length;
2393 memcpy(ev->u.message.message, compare_bytes, length);
2395 ev->u.message.length = 0;
2398 ret = evk_get_event(ev, peek_only, wait, evk_mem_kernel);
2401 if (ret == -ENOENT) {
2402 ev_ret = EV_ERR_Invalid_ID;
2403 } else if (ret == -EINTR) {
2404 ev_ret = EV_ERR_Interrupted;
2406 ev_ret = EV_ERR_Fatal;
2411 if (ev->type == EV_EVENT_Message) {
2412 message->queueID = ev->u.message.queueID;
2413 message->senderInfo = ev->u.message.senderInfo;
2414 message->length = ev->u.message.length;
2415 memcpy(message->message, ev->u.message.message, ev->u.message.length);
2427 /** @see EV_get_message */
2429 EVK_get_message(EV_ID queueID, /* OUT */EV_Message *message)
2431 return EVK_get_message_in(queueID, message, 0, 0, NULL, 0, 0);
2434 /** @see EV_wait_message */
2436 EVK_wait_message(EV_ID queueID, /* OUT */EV_Message *message)/* block */
2438 return EVK_get_message_in(queueID, message, 0, 0, NULL, 0, 1);
2441 /** @see EV_peek_message */
2443 EVK_peek_message(EV_ID queueID, /* OUT */EV_Message *message)
2445 return EVK_get_message_in(queueID, message, 0, 0, NULL, 1, 0);
2448 /** @see EV_find_message_by_sender */
2450 EVK_find_message_by_sender(EV_ID queueID, UINT32 senderInfo,
2451 /* OUT */EV_Message *message)
2454 return EV_ERR_Fatal;
2457 /** @see EV_find_message_by_content */
2459 EVK_find_message_by_content(EV_ID queueID, UINT16 length,
2460 const void *compare_bytes,
2461 /* OUT */EV_Message *message)
2464 return EV_ERR_Fatal;
2468 EXPORT_SYMBOL(evk_get_queue_entry);
2469 EXPORT_SYMBOL(EVK_init);
2470 EXPORT_SYMBOL(EVK_exit);
2472 EXPORT_SYMBOL(EVK_create_flag);
2473 EXPORT_SYMBOL(EVK_create_flag64);
2474 EXPORT_SYMBOL(EVK_create_queue);
2475 EXPORT_SYMBOL(EVK_create_flag_auto_id);
2476 EXPORT_SYMBOL(EVK_create_flag64_auto_id);
2477 EXPORT_SYMBOL(EVK_create_queue_auto_id);
2478 EXPORT_SYMBOL(EVK_destroy_flag);
2479 EXPORT_SYMBOL(EVK_destroy_queue);
2480 EXPORT_SYMBOL(EVK_set_flag);
2481 EXPORT_SYMBOL(EVK_set_flag64);
2482 EXPORT_SYMBOL(EVK_send_message);
2483 EXPORT_SYMBOL(EVK_get_flag);
2484 EXPORT_SYMBOL(EVK_wait_flag);
2485 EXPORT_SYMBOL(EVK_peek_flag);
2486 EXPORT_SYMBOL(EVK_get_flag64);
2487 EXPORT_SYMBOL(EVK_wait_flag64);
2488 EXPORT_SYMBOL(EVK_peek_flag64);
2489 EXPORT_SYMBOL(EVK_get_message);
2490 EXPORT_SYMBOL(EVK_wait_message);
2491 EXPORT_SYMBOL(EVK_peek_message);
2493 #ifndef CONFIG_COMBINE_MODULES
2494 //MODULE_LICENSE("proprietary");
2495 MODULE_LICENSE("GPL");
2496 MODULE_DESCRIPTION("EVent library for Kernel");
2497 //MODULE_SUPPORTED_DEVICE(name);
2498 //MODULE_PARM(var,type)
2499 //MODULE_PARM_DESC(var,desc)
2500 module_init(EVK_init);
2501 module_exit(EVK_exit);
2502 #endif /* !CONFIG_COMBINE_MODULES */