2 * core.c - Implementation of core module of MOST Linux driver stack
4 * Copyright (C) 2013-2015 Microchip Technology Germany II GmbH & Co. KG
6 * This program is distributed in the hope that it will be useful,
7 * but WITHOUT ANY WARRANTY; without even the implied warranty of
8 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
9 * GNU General Public License for more details.
11 * This file is licensed under GPLv2.
14 #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
15 #include <linux/module.h>
17 #include <linux/slab.h>
18 #include <linux/init.h>
19 #include <linux/device.h>
20 #include <linux/list.h>
21 #include <linux/poll.h>
22 #include <linux/wait.h>
23 #include <linux/kobject.h>
24 #include <linux/mutex.h>
25 #include <linux/completion.h>
26 #include <linux/sysfs.h>
27 #include <linux/kthread.h>
28 #include <linux/dma-mapping.h>
29 #include <linux/idr.h>
32 #define MAX_CHANNELS 64
33 #define STRING_SIZE 80
35 static struct class *most_class;
36 static struct device *class_glue_dir;
37 static struct ida mdev_id;
38 static int dummy_num_buffers;
40 struct most_c_aim_obj {
48 struct completion cleanup;
50 atomic_t mbo_nq_level;
53 struct mutex start_mutex;
55 struct most_interface *iface;
56 struct most_inst_obj *inst;
57 struct most_channel_config cfg;
60 struct list_head fifo;
62 struct list_head halt_fifo;
63 struct list_head list;
64 struct most_c_aim_obj aim0;
65 struct most_c_aim_obj aim1;
66 struct list_head trash_fifo;
67 struct task_struct *hdm_enqueue_task;
68 wait_queue_head_t hdm_fifo_wq;
71 #define to_c_obj(d) container_of(d, struct most_c_obj, kobj)
73 struct most_inst_obj {
75 struct most_interface *iface;
76 struct list_head channel_list;
77 struct most_c_obj *channel[MAX_CHANNELS];
79 struct list_head list;
82 #define to_inst_obj(d) container_of(d, struct most_inst_obj, kobj)
85 * list_pop_mbo - retrieves the first MBO of the list and removes it
86 * @ptr: the list head to grab the MBO from.
88 #define list_pop_mbo(ptr) \
90 struct mbo *_mbo = list_first_entry(ptr, struct mbo, list); \
91 list_del(&_mbo->list); \
100 * struct most_c_attr - to access the attributes of a channel object
101 * @attr: attributes of a channel
102 * @show: pointer to the show function
103 * @store: pointer to the store function
106 struct attribute attr;
107 ssize_t (*show)(struct most_c_obj *d,
108 struct most_c_attr *attr,
110 ssize_t (*store)(struct most_c_obj *d,
111 struct most_c_attr *attr,
116 #define to_channel_attr(a) container_of(a, struct most_c_attr, attr)
118 #define MOST_CHNL_ATTR(_name, _mode, _show, _store) \
119 struct most_c_attr most_chnl_attr_##_name = \
120 __ATTR(_name, _mode, _show, _store)
123 * channel_attr_show - show function of channel object
124 * @kobj: pointer to its kobject
125 * @attr: pointer to its attributes
128 static ssize_t channel_attr_show(struct kobject *kobj, struct attribute *attr,
131 struct most_c_attr *channel_attr = to_channel_attr(attr);
132 struct most_c_obj *c_obj = to_c_obj(kobj);
134 if (!channel_attr->show)
137 return channel_attr->show(c_obj, channel_attr, buf);
141 * channel_attr_store - store function of channel object
142 * @kobj: pointer to its kobject
143 * @attr: pointer to its attributes
145 * @len: length of buffer
147 static ssize_t channel_attr_store(struct kobject *kobj,
148 struct attribute *attr,
152 struct most_c_attr *channel_attr = to_channel_attr(attr);
153 struct most_c_obj *c_obj = to_c_obj(kobj);
155 if (!channel_attr->store)
157 return channel_attr->store(c_obj, channel_attr, buf, len);
160 static const struct sysfs_ops most_channel_sysfs_ops = {
161 .show = channel_attr_show,
162 .store = channel_attr_store,
166 * most_free_mbo_coherent - free an MBO and its coherent buffer
167 * @mbo: buffer to be released
170 static void most_free_mbo_coherent(struct mbo *mbo)
172 struct most_c_obj *c = mbo->context;
173 u16 const coherent_buf_size = c->cfg.buffer_size + c->cfg.extra_len;
175 dma_free_coherent(NULL, coherent_buf_size, mbo->virt_address,
178 if (atomic_sub_and_test(1, &c->mbo_ref))
179 complete(&c->cleanup);
183 * flush_channel_fifos - clear the channel fifos
184 * @c: pointer to channel object
186 static void flush_channel_fifos(struct most_c_obj *c)
188 unsigned long flags, hf_flags;
189 struct mbo *mbo, *tmp;
191 if (list_empty(&c->fifo) && list_empty(&c->halt_fifo))
194 spin_lock_irqsave(&c->fifo_lock, flags);
195 list_for_each_entry_safe(mbo, tmp, &c->fifo, list) {
196 list_del(&mbo->list);
197 spin_unlock_irqrestore(&c->fifo_lock, flags);
198 most_free_mbo_coherent(mbo);
199 spin_lock_irqsave(&c->fifo_lock, flags);
201 spin_unlock_irqrestore(&c->fifo_lock, flags);
203 spin_lock_irqsave(&c->fifo_lock, hf_flags);
204 list_for_each_entry_safe(mbo, tmp, &c->halt_fifo, list) {
205 list_del(&mbo->list);
206 spin_unlock_irqrestore(&c->fifo_lock, hf_flags);
207 most_free_mbo_coherent(mbo);
208 spin_lock_irqsave(&c->fifo_lock, hf_flags);
210 spin_unlock_irqrestore(&c->fifo_lock, hf_flags);
212 if (unlikely((!list_empty(&c->fifo) || !list_empty(&c->halt_fifo))))
213 pr_info("WARN: fifo | trash fifo not empty\n");
217 * flush_trash_fifo - clear the trash fifo
218 * @c: pointer to channel object
220 static int flush_trash_fifo(struct most_c_obj *c)
222 struct mbo *mbo, *tmp;
225 spin_lock_irqsave(&c->fifo_lock, flags);
226 list_for_each_entry_safe(mbo, tmp, &c->trash_fifo, list) {
227 list_del(&mbo->list);
228 spin_unlock_irqrestore(&c->fifo_lock, flags);
229 most_free_mbo_coherent(mbo);
230 spin_lock_irqsave(&c->fifo_lock, flags);
232 spin_unlock_irqrestore(&c->fifo_lock, flags);
237 * most_channel_release - release function of channel object
238 * @kobj: pointer to channel's kobject
240 static void most_channel_release(struct kobject *kobj)
242 struct most_c_obj *c = to_c_obj(kobj);
247 static ssize_t show_available_directions(struct most_c_obj *c,
248 struct most_c_attr *attr,
251 unsigned int i = c->channel_id;
254 if (c->iface->channel_vector[i].direction & MOST_CH_RX)
255 strcat(buf, "dir_rx ");
256 if (c->iface->channel_vector[i].direction & MOST_CH_TX)
257 strcat(buf, "dir_tx ");
259 return strlen(buf) + 1;
262 static ssize_t show_available_datatypes(struct most_c_obj *c,
263 struct most_c_attr *attr,
266 unsigned int i = c->channel_id;
269 if (c->iface->channel_vector[i].data_type & MOST_CH_CONTROL)
270 strcat(buf, "control ");
271 if (c->iface->channel_vector[i].data_type & MOST_CH_ASYNC)
272 strcat(buf, "async ");
273 if (c->iface->channel_vector[i].data_type & MOST_CH_SYNC)
274 strcat(buf, "sync ");
275 if (c->iface->channel_vector[i].data_type & MOST_CH_ISOC_AVP)
276 strcat(buf, "isoc_avp ");
278 return strlen(buf) + 1;
282 ssize_t show_number_of_packet_buffers(struct most_c_obj *c,
283 struct most_c_attr *attr,
286 unsigned int i = c->channel_id;
288 return snprintf(buf, PAGE_SIZE, "%d\n",
289 c->iface->channel_vector[i].num_buffers_packet);
293 ssize_t show_number_of_stream_buffers(struct most_c_obj *c,
294 struct most_c_attr *attr,
297 unsigned int i = c->channel_id;
299 return snprintf(buf, PAGE_SIZE, "%d\n",
300 c->iface->channel_vector[i].num_buffers_streaming);
304 ssize_t show_size_of_packet_buffer(struct most_c_obj *c,
305 struct most_c_attr *attr,
308 unsigned int i = c->channel_id;
310 return snprintf(buf, PAGE_SIZE, "%d\n",
311 c->iface->channel_vector[i].buffer_size_packet);
315 ssize_t show_size_of_stream_buffer(struct most_c_obj *c,
316 struct most_c_attr *attr,
319 unsigned int i = c->channel_id;
321 return snprintf(buf, PAGE_SIZE, "%d\n",
322 c->iface->channel_vector[i].buffer_size_streaming);
325 static ssize_t show_channel_starving(struct most_c_obj *c,
326 struct most_c_attr *attr,
329 return snprintf(buf, PAGE_SIZE, "%d\n", c->is_starving);
332 #define create_show_channel_attribute(val) \
333 static MOST_CHNL_ATTR(val, S_IRUGO, show_##val, NULL)
335 create_show_channel_attribute(available_directions);
336 create_show_channel_attribute(available_datatypes);
337 create_show_channel_attribute(number_of_packet_buffers);
338 create_show_channel_attribute(number_of_stream_buffers);
339 create_show_channel_attribute(size_of_stream_buffer);
340 create_show_channel_attribute(size_of_packet_buffer);
341 create_show_channel_attribute(channel_starving);
343 static ssize_t show_set_number_of_buffers(struct most_c_obj *c,
344 struct most_c_attr *attr,
347 return snprintf(buf, PAGE_SIZE, "%d\n", c->cfg.num_buffers);
350 static ssize_t store_set_number_of_buffers(struct most_c_obj *c,
351 struct most_c_attr *attr,
355 int ret = kstrtou16(buf, 0, &c->cfg.num_buffers);
362 static ssize_t show_set_buffer_size(struct most_c_obj *c,
363 struct most_c_attr *attr,
366 return snprintf(buf, PAGE_SIZE, "%d\n", c->cfg.buffer_size);
369 static ssize_t store_set_buffer_size(struct most_c_obj *c,
370 struct most_c_attr *attr,
374 int ret = kstrtou16(buf, 0, &c->cfg.buffer_size);
381 static ssize_t show_set_direction(struct most_c_obj *c,
382 struct most_c_attr *attr,
385 if (c->cfg.direction & MOST_CH_TX)
386 return snprintf(buf, PAGE_SIZE, "dir_tx\n");
387 else if (c->cfg.direction & MOST_CH_RX)
388 return snprintf(buf, PAGE_SIZE, "dir_rx\n");
389 return snprintf(buf, PAGE_SIZE, "unconfigured\n");
392 static ssize_t store_set_direction(struct most_c_obj *c,
393 struct most_c_attr *attr,
397 if (!strcmp(buf, "dir_rx\n")) {
398 c->cfg.direction = MOST_CH_RX;
399 } else if (!strcmp(buf, "dir_tx\n")) {
400 c->cfg.direction = MOST_CH_TX;
402 pr_info("WARN: invalid attribute settings\n");
408 static ssize_t show_set_datatype(struct most_c_obj *c,
409 struct most_c_attr *attr,
412 if (c->cfg.data_type & MOST_CH_CONTROL)
413 return snprintf(buf, PAGE_SIZE, "control\n");
414 else if (c->cfg.data_type & MOST_CH_ASYNC)
415 return snprintf(buf, PAGE_SIZE, "async\n");
416 else if (c->cfg.data_type & MOST_CH_SYNC)
417 return snprintf(buf, PAGE_SIZE, "sync\n");
418 else if (c->cfg.data_type & MOST_CH_ISOC_AVP)
419 return snprintf(buf, PAGE_SIZE, "isoc_avp\n");
420 return snprintf(buf, PAGE_SIZE, "unconfigured\n");
423 static ssize_t store_set_datatype(struct most_c_obj *c,
424 struct most_c_attr *attr,
428 if (!strcmp(buf, "control\n")) {
429 c->cfg.data_type = MOST_CH_CONTROL;
430 } else if (!strcmp(buf, "async\n")) {
431 c->cfg.data_type = MOST_CH_ASYNC;
432 } else if (!strcmp(buf, "sync\n")) {
433 c->cfg.data_type = MOST_CH_SYNC;
434 } else if (!strcmp(buf, "isoc_avp\n")) {
435 c->cfg.data_type = MOST_CH_ISOC_AVP;
437 pr_info("WARN: invalid attribute settings\n");
443 static ssize_t show_set_subbuffer_size(struct most_c_obj *c,
444 struct most_c_attr *attr,
447 return snprintf(buf, PAGE_SIZE, "%d\n", c->cfg.subbuffer_size);
450 static ssize_t store_set_subbuffer_size(struct most_c_obj *c,
451 struct most_c_attr *attr,
455 int ret = kstrtou16(buf, 0, &c->cfg.subbuffer_size);
462 static ssize_t show_set_packets_per_xact(struct most_c_obj *c,
463 struct most_c_attr *attr,
466 return snprintf(buf, PAGE_SIZE, "%d\n", c->cfg.packets_per_xact);
469 static ssize_t store_set_packets_per_xact(struct most_c_obj *c,
470 struct most_c_attr *attr,
474 int ret = kstrtou16(buf, 0, &c->cfg.packets_per_xact);
481 #define create_channel_attribute(value) \
482 static MOST_CHNL_ATTR(value, S_IRUGO | S_IWUSR, \
486 create_channel_attribute(set_buffer_size);
487 create_channel_attribute(set_number_of_buffers);
488 create_channel_attribute(set_direction);
489 create_channel_attribute(set_datatype);
490 create_channel_attribute(set_subbuffer_size);
491 create_channel_attribute(set_packets_per_xact);
494 * most_channel_def_attrs - array of default attributes of channel object
496 static struct attribute *most_channel_def_attrs[] = {
497 &most_chnl_attr_available_directions.attr,
498 &most_chnl_attr_available_datatypes.attr,
499 &most_chnl_attr_number_of_packet_buffers.attr,
500 &most_chnl_attr_number_of_stream_buffers.attr,
501 &most_chnl_attr_size_of_packet_buffer.attr,
502 &most_chnl_attr_size_of_stream_buffer.attr,
503 &most_chnl_attr_set_number_of_buffers.attr,
504 &most_chnl_attr_set_buffer_size.attr,
505 &most_chnl_attr_set_direction.attr,
506 &most_chnl_attr_set_datatype.attr,
507 &most_chnl_attr_set_subbuffer_size.attr,
508 &most_chnl_attr_set_packets_per_xact.attr,
509 &most_chnl_attr_channel_starving.attr,
513 static struct kobj_type most_channel_ktype = {
514 .sysfs_ops = &most_channel_sysfs_ops,
515 .release = most_channel_release,
516 .default_attrs = most_channel_def_attrs,
519 static struct kset *most_channel_kset;
522 * create_most_c_obj - allocates a channel object
523 * @name: name of the channel object
524 * @parent: parent kobject
526 * This create a channel object and registers it with sysfs.
527 * Returns a pointer to the object or NULL when something went wrong.
529 static struct most_c_obj *
530 create_most_c_obj(const char *name, struct kobject *parent)
532 struct most_c_obj *c;
535 c = kzalloc(sizeof(*c), GFP_KERNEL);
538 c->kobj.kset = most_channel_kset;
539 retval = kobject_init_and_add(&c->kobj, &most_channel_ktype, parent,
542 kobject_put(&c->kobj);
545 kobject_uevent(&c->kobj, KOBJ_ADD);
550 * ___I N S T A N C E___
552 #define MOST_INST_ATTR(_name, _mode, _show, _store) \
553 struct most_inst_attribute most_inst_attr_##_name = \
554 __ATTR(_name, _mode, _show, _store)
556 static struct list_head instance_list;
559 * struct most_inst_attribute - to access the attributes of instance object
560 * @attr: attributes of an instance
561 * @show: pointer to the show function
562 * @store: pointer to the store function
564 struct most_inst_attribute {
565 struct attribute attr;
566 ssize_t (*show)(struct most_inst_obj *d,
567 struct most_inst_attribute *attr,
569 ssize_t (*store)(struct most_inst_obj *d,
570 struct most_inst_attribute *attr,
575 #define to_instance_attr(a) \
576 container_of(a, struct most_inst_attribute, attr)
579 * instance_attr_show - show function for an instance object
580 * @kobj: pointer to kobject
581 * @attr: pointer to attribute struct
584 static ssize_t instance_attr_show(struct kobject *kobj,
585 struct attribute *attr,
588 struct most_inst_attribute *instance_attr;
589 struct most_inst_obj *instance_obj;
591 instance_attr = to_instance_attr(attr);
592 instance_obj = to_inst_obj(kobj);
594 if (!instance_attr->show)
597 return instance_attr->show(instance_obj, instance_attr, buf);
601 * instance_attr_store - store function for an instance object
602 * @kobj: pointer to kobject
603 * @attr: pointer to attribute struct
605 * @len: length of buffer
607 static ssize_t instance_attr_store(struct kobject *kobj,
608 struct attribute *attr,
612 struct most_inst_attribute *instance_attr;
613 struct most_inst_obj *instance_obj;
615 instance_attr = to_instance_attr(attr);
616 instance_obj = to_inst_obj(kobj);
618 if (!instance_attr->store)
621 return instance_attr->store(instance_obj, instance_attr, buf, len);
624 static const struct sysfs_ops most_inst_sysfs_ops = {
625 .show = instance_attr_show,
626 .store = instance_attr_store,
630 * most_inst_release - release function for instance object
631 * @kobj: pointer to instance's kobject
633 * This frees the allocated memory for the instance object
635 static void most_inst_release(struct kobject *kobj)
637 struct most_inst_obj *inst = to_inst_obj(kobj);
642 static ssize_t show_description(struct most_inst_obj *instance_obj,
643 struct most_inst_attribute *attr,
646 return snprintf(buf, PAGE_SIZE, "%s\n",
647 instance_obj->iface->description);
650 static ssize_t show_interface(struct most_inst_obj *instance_obj,
651 struct most_inst_attribute *attr,
654 switch (instance_obj->iface->interface) {
656 return snprintf(buf, PAGE_SIZE, "loopback\n");
658 return snprintf(buf, PAGE_SIZE, "i2c\n");
660 return snprintf(buf, PAGE_SIZE, "i2s\n");
662 return snprintf(buf, PAGE_SIZE, "tsi\n");
664 return snprintf(buf, PAGE_SIZE, "hbi\n");
665 case ITYPE_MEDIALB_DIM:
666 return snprintf(buf, PAGE_SIZE, "mlb_dim\n");
667 case ITYPE_MEDIALB_DIM2:
668 return snprintf(buf, PAGE_SIZE, "mlb_dim2\n");
670 return snprintf(buf, PAGE_SIZE, "usb\n");
672 return snprintf(buf, PAGE_SIZE, "pcie\n");
674 return snprintf(buf, PAGE_SIZE, "unknown\n");
677 #define create_inst_attribute(value) \
678 static MOST_INST_ATTR(value, S_IRUGO, show_##value, NULL)
680 create_inst_attribute(description);
681 create_inst_attribute(interface);
683 static struct attribute *most_inst_def_attrs[] = {
684 &most_inst_attr_description.attr,
685 &most_inst_attr_interface.attr,
689 static struct kobj_type most_inst_ktype = {
690 .sysfs_ops = &most_inst_sysfs_ops,
691 .release = most_inst_release,
692 .default_attrs = most_inst_def_attrs,
695 static struct kset *most_inst_kset;
698 * create_most_inst_obj - creates an instance object
699 * @name: name of the object to be created
701 * This allocates memory for an instance structure, assigns the proper kset
702 * and registers it with sysfs.
704 * Returns a pointer to the instance object or NULL when something went wrong.
706 static struct most_inst_obj *create_most_inst_obj(const char *name)
708 struct most_inst_obj *inst;
711 inst = kzalloc(sizeof(*inst), GFP_KERNEL);
714 inst->kobj.kset = most_inst_kset;
715 retval = kobject_init_and_add(&inst->kobj, &most_inst_ktype, NULL,
718 kobject_put(&inst->kobj);
721 kobject_uevent(&inst->kobj, KOBJ_ADD);
726 * destroy_most_inst_obj - MOST instance release function
727 * @inst: pointer to the instance object
729 * This decrements the reference counter of the instance object.
730 * If the reference count turns zero, its release function is called
732 static void destroy_most_inst_obj(struct most_inst_obj *inst)
734 struct most_c_obj *c, *tmp;
736 list_for_each_entry_safe(c, tmp, &inst->channel_list, list) {
738 flush_channel_fifos(c);
739 kobject_put(&c->kobj);
741 kobject_put(&inst->kobj);
747 struct most_aim_obj {
749 struct list_head list;
750 struct most_aim *driver;
751 char add_link[STRING_SIZE];
752 char remove_link[STRING_SIZE];
755 #define to_aim_obj(d) container_of(d, struct most_aim_obj, kobj)
757 static struct list_head aim_list;
760 * struct most_aim_attribute - to access the attributes of AIM object
761 * @attr: attributes of an AIM
762 * @show: pointer to the show function
763 * @store: pointer to the store function
765 struct most_aim_attribute {
766 struct attribute attr;
767 ssize_t (*show)(struct most_aim_obj *d,
768 struct most_aim_attribute *attr,
770 ssize_t (*store)(struct most_aim_obj *d,
771 struct most_aim_attribute *attr,
776 #define to_aim_attr(a) container_of(a, struct most_aim_attribute, attr)
779 * aim_attr_show - show function of an AIM object
780 * @kobj: pointer to kobject
781 * @attr: pointer to attribute struct
784 static ssize_t aim_attr_show(struct kobject *kobj,
785 struct attribute *attr,
788 struct most_aim_attribute *aim_attr;
789 struct most_aim_obj *aim_obj;
791 aim_attr = to_aim_attr(attr);
792 aim_obj = to_aim_obj(kobj);
797 return aim_attr->show(aim_obj, aim_attr, buf);
801 * aim_attr_store - store function of an AIM object
802 * @kobj: pointer to kobject
803 * @attr: pointer to attribute struct
805 * @len: length of buffer
807 static ssize_t aim_attr_store(struct kobject *kobj,
808 struct attribute *attr,
812 struct most_aim_attribute *aim_attr;
813 struct most_aim_obj *aim_obj;
815 aim_attr = to_aim_attr(attr);
816 aim_obj = to_aim_obj(kobj);
818 if (!aim_attr->store)
820 return aim_attr->store(aim_obj, aim_attr, buf, len);
823 static const struct sysfs_ops most_aim_sysfs_ops = {
824 .show = aim_attr_show,
825 .store = aim_attr_store,
829 * most_aim_release - AIM release function
830 * @kobj: pointer to AIM's kobject
832 static void most_aim_release(struct kobject *kobj)
834 struct most_aim_obj *aim_obj = to_aim_obj(kobj);
839 static ssize_t show_add_link(struct most_aim_obj *aim_obj,
840 struct most_aim_attribute *attr,
843 return snprintf(buf, PAGE_SIZE, "%s\n", aim_obj->add_link);
847 * split_string - parses and changes string in the buffer buf and
848 * splits it into two mandatory and one optional substrings.
850 * @buf: complete string from attribute 'add_channel'
851 * @a: address of pointer to 1st substring (=instance name)
852 * @b: address of pointer to 2nd substring (=channel name)
853 * @c: optional address of pointer to 3rd substring (=user defined name)
857 * Input: "mdev0:ch0@ep_81:my_channel\n" or
858 * "mdev0:ch0@ep_81:my_channel"
860 * Output: *a -> "mdev0", *b -> "ch0@ep_81", *c -> "my_channel"
862 * Input: "mdev0:ch0@ep_81\n"
863 * Output: *a -> "mdev0", *b -> "ch0@ep_81", *c -> ""
865 * Input: "mdev0:ch0@ep_81"
866 * Output: *a -> "mdev0", *b -> "ch0@ep_81", *c == NULL
868 static int split_string(char *buf, char **a, char **b, char **c)
870 *a = strsep(&buf, ":");
874 *b = strsep(&buf, ":\n");
879 *c = strsep(&buf, ":\n");
885 * get_channel_by_name - get pointer to channel object
886 * @mdev: name of the device instance
887 * @mdev_ch: name of the respective channel
889 * This retrieves the pointer to a channel object.
892 most_c_obj *get_channel_by_name(char *mdev, char *mdev_ch)
894 struct most_c_obj *c, *tmp;
895 struct most_inst_obj *i, *i_tmp;
898 list_for_each_entry_safe(i, i_tmp, &instance_list, list) {
899 if (!strcmp(kobject_name(&i->kobj), mdev)) {
904 if (unlikely(!found))
905 return ERR_PTR(-EIO);
907 list_for_each_entry_safe(c, tmp, &i->channel_list, list) {
908 if (!strcmp(kobject_name(&c->kobj), mdev_ch)) {
913 if (unlikely(found < 2))
914 return ERR_PTR(-EIO);
919 * store_add_link - store() function for add_link attribute
920 * @aim_obj: pointer to AIM object
921 * @attr: its attributes
923 * @len: buffer length
925 * This parses the string given by buf and splits it into
926 * three substrings. Note: third substring is optional. In case a cdev
927 * AIM is loaded the optional 3rd substring will make up the name of
928 * device node in the /dev directory. If omitted, the device node will
929 * inherit the channel's name within sysfs.
931 * Searches for a pair of device and channel and probes the AIM
934 * (1) echo -n -e "mdev0:ch0@ep_81:my_rxchannel\n" >add_link
935 * (2) echo -n -e "mdev0:ch0@ep_81\n" >add_link
937 * (1) would create the device node /dev/my_rxchannel
938 * (2) would create the device node /dev/mdev0-ch0@ep_81
940 static ssize_t store_add_link(struct most_aim_obj *aim_obj,
941 struct most_aim_attribute *attr,
945 struct most_c_obj *c;
946 struct most_aim **aim_ptr;
947 char buffer[STRING_SIZE];
951 char devnod_buf[STRING_SIZE];
953 size_t max_len = min_t(size_t, len + 1, STRING_SIZE);
955 strlcpy(buffer, buf, max_len);
956 strlcpy(aim_obj->add_link, buf, max_len);
958 ret = split_string(buffer, &mdev, &mdev_ch, &mdev_devnod);
962 if (!mdev_devnod || *mdev_devnod == 0) {
963 snprintf(devnod_buf, sizeof(devnod_buf), "%s-%s", mdev,
965 mdev_devnod = devnod_buf;
968 c = get_channel_by_name(mdev, mdev_ch);
973 aim_ptr = &c->aim0.ptr;
974 else if (!c->aim1.ptr)
975 aim_ptr = &c->aim1.ptr;
979 *aim_ptr = aim_obj->driver;
980 ret = aim_obj->driver->probe_channel(c->iface, c->channel_id,
981 &c->cfg, &c->kobj, mdev_devnod);
990 static struct most_aim_attribute most_aim_attr_add_link =
991 __ATTR(add_link, S_IRUGO | S_IWUSR, show_add_link, store_add_link);
993 static ssize_t show_remove_link(struct most_aim_obj *aim_obj,
994 struct most_aim_attribute *attr,
997 return snprintf(buf, PAGE_SIZE, "%s\n", aim_obj->remove_link);
1001 * store_remove_link - store function for remove_link attribute
1002 * @aim_obj: pointer to AIM object
1003 * @attr: its attributes
1005 * @len: buffer length
1008 * echo -n -e "mdev0:ch0@ep_81\n" >remove_link
1010 static ssize_t store_remove_link(struct most_aim_obj *aim_obj,
1011 struct most_aim_attribute *attr,
1015 struct most_c_obj *c;
1016 char buffer[STRING_SIZE];
1020 size_t max_len = min_t(size_t, len + 1, STRING_SIZE);
1022 strlcpy(buffer, buf, max_len);
1023 strlcpy(aim_obj->remove_link, buf, max_len);
1024 ret = split_string(buffer, &mdev, &mdev_ch, NULL);
1028 c = get_channel_by_name(mdev, mdev_ch);
1032 if (aim_obj->driver->disconnect_channel(c->iface, c->channel_id))
1034 if (c->aim0.ptr == aim_obj->driver)
1036 if (c->aim1.ptr == aim_obj->driver)
1041 static struct most_aim_attribute most_aim_attr_remove_link =
1042 __ATTR(remove_link, S_IRUGO | S_IWUSR, show_remove_link,
1045 static struct attribute *most_aim_def_attrs[] = {
1046 &most_aim_attr_add_link.attr,
1047 &most_aim_attr_remove_link.attr,
1051 static struct kobj_type most_aim_ktype = {
1052 .sysfs_ops = &most_aim_sysfs_ops,
1053 .release = most_aim_release,
1054 .default_attrs = most_aim_def_attrs,
1057 static struct kset *most_aim_kset;
1060 * create_most_aim_obj - creates an AIM object
1061 * @name: name of the AIM
1063 * This creates an AIM object assigns the proper kset and registers
1065 * Returns a pointer to the object or NULL if something went wrong.
1067 static struct most_aim_obj *create_most_aim_obj(const char *name)
1069 struct most_aim_obj *most_aim;
1072 most_aim = kzalloc(sizeof(*most_aim), GFP_KERNEL);
1075 most_aim->kobj.kset = most_aim_kset;
1076 retval = kobject_init_and_add(&most_aim->kobj, &most_aim_ktype,
1079 kobject_put(&most_aim->kobj);
1082 kobject_uevent(&most_aim->kobj, KOBJ_ADD);
1087 * destroy_most_aim_obj - AIM release function
1088 * @p: pointer to AIM object
1090 * This decrements the reference counter of the AIM object. If the
1091 * reference count turns zero, its release function will be called.
1093 static void destroy_most_aim_obj(struct most_aim_obj *p)
1095 kobject_put(&p->kobj);
1103 * Instantiation of the MOST bus
1105 static struct bus_type most_bus = {
1110 * Instantiation of the core driver
1112 static struct device_driver mostcore = {
1117 static inline void trash_mbo(struct mbo *mbo)
1119 unsigned long flags;
1120 struct most_c_obj *c = mbo->context;
1122 spin_lock_irqsave(&c->fifo_lock, flags);
1123 list_add(&mbo->list, &c->trash_fifo);
1124 spin_unlock_irqrestore(&c->fifo_lock, flags);
1127 static struct mbo *get_hdm_mbo(struct most_c_obj *c)
1129 unsigned long flags;
1132 spin_lock_irqsave(&c->fifo_lock, flags);
1133 if (c->enqueue_halt || list_empty(&c->halt_fifo))
1136 mbo = list_pop_mbo(&c->halt_fifo);
1137 spin_unlock_irqrestore(&c->fifo_lock, flags);
1141 static void nq_hdm_mbo(struct mbo *mbo)
1143 unsigned long flags;
1144 struct most_c_obj *c = mbo->context;
1146 spin_lock_irqsave(&c->fifo_lock, flags);
1147 list_add_tail(&mbo->list, &c->halt_fifo);
1148 spin_unlock_irqrestore(&c->fifo_lock, flags);
1149 wake_up_interruptible(&c->hdm_fifo_wq);
1152 static int hdm_enqueue_thread(void *data)
1154 struct most_c_obj *c = data;
1156 typeof(c->iface->enqueue) enqueue = c->iface->enqueue;
1158 while (likely(!kthread_should_stop())) {
1159 wait_event_interruptible(c->hdm_fifo_wq,
1160 (mbo = get_hdm_mbo(c)) ||
1161 kthread_should_stop());
1166 if (c->cfg.direction == MOST_CH_RX)
1167 mbo->buffer_length = c->cfg.buffer_size;
1169 if (unlikely(enqueue(mbo->ifp, mbo->hdm_channel_id, mbo))) {
1170 pr_err("hdm enqueue failed\n");
1172 c->hdm_enqueue_task = NULL;
1180 static int run_enqueue_thread(struct most_c_obj *c, int channel_id)
1182 struct task_struct *task =
1183 kthread_run(hdm_enqueue_thread, c, "hdm_fifo_%d",
1187 return PTR_ERR(task);
1189 c->hdm_enqueue_task = task;
1194 * arm_mbo - recycle MBO for further usage
1195 * @mbo: buffer object
1197 * This puts an MBO back to the list to have it ready for up coming
1200 * In case the MBO belongs to a channel that recently has been
1201 * poisoned, the MBO is scheduled to be trashed.
1202 * Calls the completion handler of an attached AIM.
1204 static void arm_mbo(struct mbo *mbo)
1206 unsigned long flags;
1207 struct most_c_obj *c;
1209 BUG_ON((!mbo) || (!mbo->context));
1212 if (c->is_poisoned) {
1217 spin_lock_irqsave(&c->fifo_lock, flags);
1218 ++*mbo->num_buffers_ptr;
1219 list_add_tail(&mbo->list, &c->fifo);
1220 spin_unlock_irqrestore(&c->fifo_lock, flags);
1222 if (c->aim0.refs && c->aim0.ptr->tx_completion)
1223 c->aim0.ptr->tx_completion(c->iface, c->channel_id);
1225 if (c->aim1.refs && c->aim1.ptr->tx_completion)
1226 c->aim1.ptr->tx_completion(c->iface, c->channel_id);
1230 * arm_mbo_chain - helper function that arms an MBO chain for the HDM
1231 * @c: pointer to interface channel
1232 * @dir: direction of the channel
1233 * @compl: pointer to completion function
1235 * This allocates buffer objects including the containing DMA coherent
1236 * buffer and puts them in the fifo.
1237 * Buffers of Rx channels are put in the kthread fifo, hence immediately
1238 * submitted to the HDM.
1240 * Returns the number of allocated and enqueued MBOs.
1242 static int arm_mbo_chain(struct most_c_obj *c, int dir,
1243 void (*compl)(struct mbo *))
1248 u32 coherent_buf_size = c->cfg.buffer_size + c->cfg.extra_len;
1250 atomic_set(&c->mbo_nq_level, 0);
1252 for (i = 0; i < c->cfg.num_buffers; i++) {
1253 mbo = kzalloc(sizeof(*mbo), GFP_KERNEL);
1255 pr_info("WARN: Allocation of MBO failed.\n");
1260 mbo->ifp = c->iface;
1261 mbo->hdm_channel_id = c->channel_id;
1262 mbo->virt_address = dma_alloc_coherent(NULL,
1266 if (!mbo->virt_address) {
1267 pr_info("WARN: No DMA coherent buffer.\n");
1271 mbo->complete = compl;
1272 mbo->num_buffers_ptr = &dummy_num_buffers;
1273 if (dir == MOST_CH_RX) {
1275 atomic_inc(&c->mbo_nq_level);
1289 * most_submit_mbo - submits an MBO to fifo
1290 * @mbo: pointer to the MBO
1293 int most_submit_mbo(struct mbo *mbo)
1295 if (unlikely((!mbo) || (!mbo->context))) {
1296 pr_err("Bad MBO or missing channel reference\n");
1303 EXPORT_SYMBOL_GPL(most_submit_mbo);
1306 * most_write_completion - write completion handler
1307 * @mbo: pointer to MBO
1309 * This recycles the MBO for further usage. In case the channel has been
1310 * poisoned, the MBO is scheduled to be trashed.
1312 static void most_write_completion(struct mbo *mbo)
1314 struct most_c_obj *c;
1316 BUG_ON((!mbo) || (!mbo->context));
1319 if (mbo->status == MBO_E_INVAL)
1320 pr_info("WARN: Tx MBO status: invalid\n");
1321 if (unlikely(c->is_poisoned || (mbo->status == MBO_E_CLOSE)))
1328 * get_channel_by_iface - get pointer to channel object
1329 * @iface: pointer to interface instance
1332 * This retrieves a pointer to a channel of the given interface and channel ID.
1335 most_c_obj *get_channel_by_iface(struct most_interface *iface, int id)
1337 struct most_inst_obj *i;
1339 if (unlikely(!iface)) {
1340 pr_err("Bad interface\n");
1343 if (unlikely((id < 0) || (id >= iface->num_channels))) {
1344 pr_err("Channel index (%d) out of range\n", id);
1349 pr_err("interface is not registered\n");
1352 return i->channel[id];
1355 int channel_has_mbo(struct most_interface *iface, int id, struct most_aim *aim)
1357 struct most_c_obj *c = get_channel_by_iface(iface, id);
1358 unsigned long flags;
1364 if (c->aim0.refs && c->aim1.refs &&
1365 ((aim == c->aim0.ptr && c->aim0.num_buffers <= 0) ||
1366 (aim == c->aim1.ptr && c->aim1.num_buffers <= 0)))
1369 spin_lock_irqsave(&c->fifo_lock, flags);
1370 empty = list_empty(&c->fifo);
1371 spin_unlock_irqrestore(&c->fifo_lock, flags);
1374 EXPORT_SYMBOL_GPL(channel_has_mbo);
1377 * most_get_mbo - get pointer to an MBO of pool
1378 * @iface: pointer to interface instance
1381 * This attempts to get a free buffer out of the channel fifo.
1382 * Returns a pointer to MBO on success or NULL otherwise.
1384 struct mbo *most_get_mbo(struct most_interface *iface, int id,
1385 struct most_aim *aim)
1388 struct most_c_obj *c;
1389 unsigned long flags;
1390 int *num_buffers_ptr;
1392 c = get_channel_by_iface(iface, id);
1396 if (c->aim0.refs && c->aim1.refs &&
1397 ((aim == c->aim0.ptr && c->aim0.num_buffers <= 0) ||
1398 (aim == c->aim1.ptr && c->aim1.num_buffers <= 0)))
1401 if (aim == c->aim0.ptr)
1402 num_buffers_ptr = &c->aim0.num_buffers;
1403 else if (aim == c->aim1.ptr)
1404 num_buffers_ptr = &c->aim1.num_buffers;
1406 num_buffers_ptr = &dummy_num_buffers;
1408 spin_lock_irqsave(&c->fifo_lock, flags);
1409 if (list_empty(&c->fifo)) {
1410 spin_unlock_irqrestore(&c->fifo_lock, flags);
1413 mbo = list_pop_mbo(&c->fifo);
1415 spin_unlock_irqrestore(&c->fifo_lock, flags);
1417 mbo->num_buffers_ptr = num_buffers_ptr;
1418 mbo->buffer_length = c->cfg.buffer_size;
1421 EXPORT_SYMBOL_GPL(most_get_mbo);
1424 * most_put_mbo - return buffer to pool
1425 * @mbo: buffer object
1427 void most_put_mbo(struct mbo *mbo)
1429 struct most_c_obj *c = mbo->context;
1431 if (c->cfg.direction == MOST_CH_TX) {
1436 atomic_inc(&c->mbo_nq_level);
1438 EXPORT_SYMBOL_GPL(most_put_mbo);
1441 * most_read_completion - read completion handler
1442 * @mbo: pointer to MBO
1444 * This function is called by the HDM when data has been received from the
1445 * hardware and copied to the buffer of the MBO.
1447 * In case the channel has been poisoned it puts the buffer in the trash queue.
1448 * Otherwise, it passes the buffer to an AIM for further processing.
1450 static void most_read_completion(struct mbo *mbo)
1452 struct most_c_obj *c = mbo->context;
1454 if (unlikely(c->is_poisoned || (mbo->status == MBO_E_CLOSE))) {
1459 if (mbo->status == MBO_E_INVAL) {
1461 atomic_inc(&c->mbo_nq_level);
1465 if (atomic_sub_and_test(1, &c->mbo_nq_level)) {
1466 pr_info("WARN: rx device out of buffers\n");
1470 if (c->aim0.refs && c->aim0.ptr->rx_completion &&
1471 c->aim0.ptr->rx_completion(mbo) == 0)
1474 if (c->aim1.refs && c->aim1.ptr->rx_completion &&
1475 c->aim1.ptr->rx_completion(mbo) == 0)
1482 * most_start_channel - prepares a channel for communication
1483 * @iface: pointer to interface instance
1486 * This prepares the channel for usage. Cross-checks whether the
1487 * channel's been properly configured.
1489 * Returns 0 on success or error code otherwise.
1491 int most_start_channel(struct most_interface *iface, int id,
1492 struct most_aim *aim)
1496 struct most_c_obj *c = get_channel_by_iface(iface, id);
1501 mutex_lock(&c->start_mutex);
1502 if (c->aim0.refs + c->aim1.refs > 0)
1503 goto out; /* already started by other aim */
1505 if (!try_module_get(iface->mod)) {
1506 pr_info("failed to acquire HDM lock\n");
1507 mutex_unlock(&c->start_mutex);
1511 c->cfg.extra_len = 0;
1512 if (c->iface->configure(c->iface, c->channel_id, &c->cfg)) {
1513 pr_info("channel configuration failed. Go check settings...\n");
1518 init_waitqueue_head(&c->hdm_fifo_wq);
1520 if (c->cfg.direction == MOST_CH_RX)
1521 num_buffer = arm_mbo_chain(c, c->cfg.direction,
1522 most_read_completion);
1524 num_buffer = arm_mbo_chain(c, c->cfg.direction,
1525 most_write_completion);
1526 if (unlikely(!num_buffer)) {
1527 pr_info("failed to allocate memory\n");
1532 ret = run_enqueue_thread(c, id);
1537 c->aim0.num_buffers = c->cfg.num_buffers / 2;
1538 c->aim1.num_buffers = c->cfg.num_buffers - c->aim0.num_buffers;
1539 atomic_set(&c->mbo_ref, num_buffer);
1542 if (aim == c->aim0.ptr)
1544 if (aim == c->aim1.ptr)
1546 mutex_unlock(&c->start_mutex);
1550 module_put(iface->mod);
1551 mutex_unlock(&c->start_mutex);
1554 EXPORT_SYMBOL_GPL(most_start_channel);
1557 * most_stop_channel - stops a running channel
1558 * @iface: pointer to interface instance
1561 int most_stop_channel(struct most_interface *iface, int id,
1562 struct most_aim *aim)
1564 struct most_c_obj *c;
1566 if (unlikely((!iface) || (id >= iface->num_channels) || (id < 0))) {
1567 pr_err("Bad interface or index out of range\n");
1570 c = get_channel_by_iface(iface, id);
1574 mutex_lock(&c->start_mutex);
1575 if (c->aim0.refs + c->aim1.refs >= 2)
1578 if (c->hdm_enqueue_task)
1579 kthread_stop(c->hdm_enqueue_task);
1580 c->hdm_enqueue_task = NULL;
1583 module_put(iface->mod);
1585 c->is_poisoned = true;
1586 if (c->iface->poison_channel(c->iface, c->channel_id)) {
1587 pr_err("Cannot stop channel %d of mdev %s\n", c->channel_id,
1588 c->iface->description);
1589 mutex_unlock(&c->start_mutex);
1592 flush_trash_fifo(c);
1593 flush_channel_fifos(c);
1595 #ifdef CMPL_INTERRUPTIBLE
1596 if (wait_for_completion_interruptible(&c->cleanup)) {
1597 pr_info("Interrupted while clean up ch %d\n", c->channel_id);
1598 mutex_unlock(&c->start_mutex);
1602 wait_for_completion(&c->cleanup);
1604 c->is_poisoned = false;
1607 if (aim == c->aim0.ptr)
1609 if (aim == c->aim1.ptr)
1611 mutex_unlock(&c->start_mutex);
1614 EXPORT_SYMBOL_GPL(most_stop_channel);
1617 * most_register_aim - registers an AIM (driver) with the core
1618 * @aim: instance of AIM to be registered
1620 int most_register_aim(struct most_aim *aim)
1622 struct most_aim_obj *aim_obj;
1625 pr_err("Bad driver\n");
1628 aim_obj = create_most_aim_obj(aim->name);
1630 pr_info("failed to alloc driver object\n");
1633 aim_obj->driver = aim;
1634 aim->context = aim_obj;
1635 pr_info("registered new application interfacing module %s\n",
1637 list_add_tail(&aim_obj->list, &aim_list);
1640 EXPORT_SYMBOL_GPL(most_register_aim);
1643 * most_deregister_aim - deregisters an AIM (driver) with the core
1644 * @aim: AIM to be removed
1646 int most_deregister_aim(struct most_aim *aim)
1648 struct most_aim_obj *aim_obj;
1649 struct most_c_obj *c, *tmp;
1650 struct most_inst_obj *i, *i_tmp;
1653 pr_err("Bad driver\n");
1657 aim_obj = aim->context;
1659 pr_info("driver not registered.\n");
1662 list_for_each_entry_safe(i, i_tmp, &instance_list, list) {
1663 list_for_each_entry_safe(c, tmp, &i->channel_list, list) {
1664 if (c->aim0.ptr == aim || c->aim1.ptr == aim)
1665 aim->disconnect_channel(
1666 c->iface, c->channel_id);
1667 if (c->aim0.ptr == aim)
1669 if (c->aim1.ptr == aim)
1673 list_del(&aim_obj->list);
1674 destroy_most_aim_obj(aim_obj);
1675 pr_info("deregistering application interfacing module %s\n", aim->name);
1678 EXPORT_SYMBOL_GPL(most_deregister_aim);
1681 * most_register_interface - registers an interface with core
1682 * @iface: pointer to the instance of the interface description.
1684 * Allocates and initializes a new interface instance and all of its channels.
1685 * Returns a pointer to kobject or an error pointer.
1687 struct kobject *most_register_interface(struct most_interface *iface)
1691 char name[STRING_SIZE];
1692 char channel_name[STRING_SIZE];
1693 struct most_c_obj *c;
1694 struct most_inst_obj *inst;
1696 if (!iface || !iface->enqueue || !iface->configure ||
1697 !iface->poison_channel || (iface->num_channels > MAX_CHANNELS)) {
1698 pr_err("Bad interface or channel overflow\n");
1699 return ERR_PTR(-EINVAL);
1702 id = ida_simple_get(&mdev_id, 0, 0, GFP_KERNEL);
1704 pr_info("Failed to alloc mdev ID\n");
1707 snprintf(name, STRING_SIZE, "mdev%d", id);
1709 inst = create_most_inst_obj(name);
1711 pr_info("Failed to allocate interface instance\n");
1712 ida_simple_remove(&mdev_id, id);
1713 return ERR_PTR(-ENOMEM);
1717 INIT_LIST_HEAD(&inst->channel_list);
1718 inst->iface = iface;
1720 list_add_tail(&inst->list, &instance_list);
1722 for (i = 0; i < iface->num_channels; i++) {
1723 const char *name_suffix = iface->channel_vector[i].name_suffix;
1726 snprintf(channel_name, STRING_SIZE, "ch%d", i);
1727 else if (name_suffix[0] == '@')
1728 snprintf(channel_name, STRING_SIZE, "ch%d%s", i,
1731 snprintf(channel_name, STRING_SIZE, "%s", name_suffix);
1733 /* this increments the reference count of this instance */
1734 c = create_most_c_obj(channel_name, &inst->kobj);
1737 inst->channel[i] = c;
1742 c->keep_mbo = false;
1743 c->enqueue_halt = false;
1744 c->is_poisoned = false;
1745 c->cfg.direction = 0;
1746 c->cfg.data_type = 0;
1747 c->cfg.num_buffers = 0;
1748 c->cfg.buffer_size = 0;
1749 c->cfg.subbuffer_size = 0;
1750 c->cfg.packets_per_xact = 0;
1751 spin_lock_init(&c->fifo_lock);
1752 INIT_LIST_HEAD(&c->fifo);
1753 INIT_LIST_HEAD(&c->trash_fifo);
1754 INIT_LIST_HEAD(&c->halt_fifo);
1755 init_completion(&c->cleanup);
1756 atomic_set(&c->mbo_ref, 0);
1757 mutex_init(&c->start_mutex);
1758 list_add_tail(&c->list, &inst->channel_list);
1760 pr_info("registered new MOST device mdev%d (%s)\n",
1761 inst->dev_id, iface->description);
1765 pr_info("Failed allocate channel(s)\n");
1766 list_del(&inst->list);
1767 ida_simple_remove(&mdev_id, id);
1768 destroy_most_inst_obj(inst);
1769 return ERR_PTR(-ENOMEM);
1771 EXPORT_SYMBOL_GPL(most_register_interface);
1774 * most_deregister_interface - deregisters an interface with core
1775 * @iface: pointer to the interface instance description.
1777 * Before removing an interface instance from the list, all running
1778 * channels are stopped and poisoned.
1780 void most_deregister_interface(struct most_interface *iface)
1782 struct most_inst_obj *i = iface->priv;
1783 struct most_c_obj *c;
1786 pr_info("Bad Interface\n");
1789 pr_info("deregistering MOST device %s (%s)\n", i->kobj.name,
1790 iface->description);
1792 list_for_each_entry(c, &i->channel_list, list) {
1794 c->aim0.ptr->disconnect_channel(c->iface,
1797 c->aim1.ptr->disconnect_channel(c->iface,
1803 ida_simple_remove(&mdev_id, i->dev_id);
1805 destroy_most_inst_obj(i);
1807 EXPORT_SYMBOL_GPL(most_deregister_interface);
1810 * most_stop_enqueue - prevents core from enqueueing MBOs
1811 * @iface: pointer to interface
1814 * This is called by an HDM that _cannot_ attend to its duties and
1815 * is imminent to get run over by the core. The core is not going to
1816 * enqueue any further packets unless the flagging HDM calls
1817 * most_resume enqueue().
1819 void most_stop_enqueue(struct most_interface *iface, int id)
1821 struct most_c_obj *c = get_channel_by_iface(iface, id);
1824 c->enqueue_halt = true;
1826 EXPORT_SYMBOL_GPL(most_stop_enqueue);
1829 * most_resume_enqueue - allow core to enqueue MBOs again
1830 * @iface: pointer to interface
1833 * This clears the enqueue halt flag and enqueues all MBOs currently
1834 * sitting in the wait fifo.
1836 void most_resume_enqueue(struct most_interface *iface, int id)
1838 struct most_c_obj *c = get_channel_by_iface(iface, id);
1842 c->enqueue_halt = false;
1844 wake_up_interruptible(&c->hdm_fifo_wq);
1846 EXPORT_SYMBOL_GPL(most_resume_enqueue);
1848 static int __init most_init(void)
1850 pr_info("init()\n");
1851 INIT_LIST_HEAD(&instance_list);
1852 INIT_LIST_HEAD(&aim_list);
1855 if (bus_register(&most_bus)) {
1856 pr_info("Cannot register most bus\n");
1860 most_class = class_create(THIS_MODULE, "most");
1861 if (IS_ERR(most_class)) {
1862 pr_info("No udev support.\n");
1865 if (driver_register(&mostcore)) {
1866 pr_info("Cannot register core driver\n");
1871 device_create(most_class, NULL, 0, NULL, "mostcore");
1872 if (!class_glue_dir)
1876 kset_create_and_add("aims", NULL, &class_glue_dir->kobj);
1878 goto exit_class_container;
1881 kset_create_and_add("devices", NULL, &class_glue_dir->kobj);
1882 if (!most_inst_kset)
1883 goto exit_driver_kset;
1888 kset_unregister(most_aim_kset);
1889 exit_class_container:
1890 device_destroy(most_class, 0);
1892 driver_unregister(&mostcore);
1894 class_destroy(most_class);
1896 bus_unregister(&most_bus);
1901 static void __exit most_exit(void)
1903 struct most_inst_obj *i, *i_tmp;
1904 struct most_aim_obj *d, *d_tmp;
1906 pr_info("exit core module\n");
1907 list_for_each_entry_safe(d, d_tmp, &aim_list, list) {
1908 destroy_most_aim_obj(d);
1911 list_for_each_entry_safe(i, i_tmp, &instance_list, list) {
1913 destroy_most_inst_obj(i);
1915 kset_unregister(most_inst_kset);
1916 kset_unregister(most_aim_kset);
1917 device_destroy(most_class, 0);
1918 driver_unregister(&mostcore);
1919 class_destroy(most_class);
1920 bus_unregister(&most_bus);
1921 ida_destroy(&mdev_id);
1924 module_init(most_init);
1925 module_exit(most_exit);
1926 MODULE_LICENSE("GPL");
1927 MODULE_AUTHOR("Christian Gromm <christian.gromm@microchip.com>");
1928 MODULE_DESCRIPTION("Core module of stacked MOST Linux driver");