recipes-kernel: add MOST driver kernel modules
[AGL/meta-agl-demo.git] / recipes-kernel / mostcore / files / core.c
1 /*
2  * core.c - Implementation of core module of MOST Linux driver stack
3  *
4  * Copyright (C) 2013-2015 Microchip Technology Germany II GmbH & Co. KG
5  *
6  * This program is distributed in the hope that it will be useful,
7  * but WITHOUT ANY WARRANTY; without even the implied warranty of
8  * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
9  * GNU General Public License for more details.
10  *
11  * This file is licensed under GPLv2.
12  */
13
14 #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
15 #include <linux/module.h>
16 #include <linux/fs.h>
17 #include <linux/slab.h>
18 #include <linux/init.h>
19 #include <linux/device.h>
20 #include <linux/list.h>
21 #include <linux/poll.h>
22 #include <linux/wait.h>
23 #include <linux/kobject.h>
24 #include <linux/mutex.h>
25 #include <linux/completion.h>
26 #include <linux/sysfs.h>
27 #include <linux/kthread.h>
28 #include <linux/dma-mapping.h>
29 #include <linux/idr.h>
30 #include "mostcore.h"
31
32 #define MAX_CHANNELS    64
33 #define STRING_SIZE     80
34
35 static struct class *most_class;
36 static struct device *class_glue_dir;
37 static struct ida mdev_id;
38 static int dummy_num_buffers;
39
40 struct most_c_aim_obj {
41         struct most_aim *ptr;
42         int refs;
43         int num_buffers;
44 };
45
46 struct most_c_obj {
47         struct kobject kobj;
48         struct completion cleanup;
49         atomic_t mbo_ref;
50         atomic_t mbo_nq_level;
51         u16 channel_id;
52         bool is_poisoned;
53         struct mutex start_mutex;
54         int is_starving;
55         struct most_interface *iface;
56         struct most_inst_obj *inst;
57         struct most_channel_config cfg;
58         bool keep_mbo;
59         bool enqueue_halt;
60         struct list_head fifo;
61         spinlock_t fifo_lock;
62         struct list_head halt_fifo;
63         struct list_head list;
64         struct most_c_aim_obj aim0;
65         struct most_c_aim_obj aim1;
66         struct list_head trash_fifo;
67         struct task_struct *hdm_enqueue_task;
68         wait_queue_head_t hdm_fifo_wq;
69 };
70
71 #define to_c_obj(d) container_of(d, struct most_c_obj, kobj)
72
73 struct most_inst_obj {
74         int dev_id;
75         struct most_interface *iface;
76         struct list_head channel_list;
77         struct most_c_obj *channel[MAX_CHANNELS];
78         struct kobject kobj;
79         struct list_head list;
80 };
81
82 #define to_inst_obj(d) container_of(d, struct most_inst_obj, kobj)
83
84 /**
85  * list_pop_mbo - retrieves the first MBO of the list and removes it
86  * @ptr: the list head to grab the MBO from.
87  */
88 #define list_pop_mbo(ptr)                                               \
89 ({                                                                      \
90         struct mbo *_mbo = list_first_entry(ptr, struct mbo, list);     \
91         list_del(&_mbo->list);                                          \
92         _mbo;                                                           \
93 })
94
95 /*                   ___             ___
96  *                   ___C H A N N E L___
97  */
98
99 /**
100  * struct most_c_attr - to access the attributes of a channel object
101  * @attr: attributes of a channel
102  * @show: pointer to the show function
103  * @store: pointer to the store function
104  */
105 struct most_c_attr {
106         struct attribute attr;
107         ssize_t (*show)(struct most_c_obj *d,
108                         struct most_c_attr *attr,
109                         char *buf);
110         ssize_t (*store)(struct most_c_obj *d,
111                          struct most_c_attr *attr,
112                          const char *buf,
113                          size_t count);
114 };
115
116 #define to_channel_attr(a) container_of(a, struct most_c_attr, attr)
117
118 #define MOST_CHNL_ATTR(_name, _mode, _show, _store) \
119                 struct most_c_attr most_chnl_attr_##_name = \
120                 __ATTR(_name, _mode, _show, _store)
121
122 /**
123  * channel_attr_show - show function of channel object
124  * @kobj: pointer to its kobject
125  * @attr: pointer to its attributes
126  * @buf: buffer
127  */
128 static ssize_t channel_attr_show(struct kobject *kobj, struct attribute *attr,
129                                  char *buf)
130 {
131         struct most_c_attr *channel_attr = to_channel_attr(attr);
132         struct most_c_obj *c_obj = to_c_obj(kobj);
133
134         if (!channel_attr->show)
135                 return -EIO;
136
137         return channel_attr->show(c_obj, channel_attr, buf);
138 }
139
140 /**
141  * channel_attr_store - store function of channel object
142  * @kobj: pointer to its kobject
143  * @attr: pointer to its attributes
144  * @buf: buffer
145  * @len: length of buffer
146  */
147 static ssize_t channel_attr_store(struct kobject *kobj,
148                                   struct attribute *attr,
149                                   const char *buf,
150                                   size_t len)
151 {
152         struct most_c_attr *channel_attr = to_channel_attr(attr);
153         struct most_c_obj *c_obj = to_c_obj(kobj);
154
155         if (!channel_attr->store)
156                 return -EIO;
157         return channel_attr->store(c_obj, channel_attr, buf, len);
158 }
159
160 static const struct sysfs_ops most_channel_sysfs_ops = {
161         .show = channel_attr_show,
162         .store = channel_attr_store,
163 };
164
165 /**
166  * most_free_mbo_coherent - free an MBO and its coherent buffer
167  * @mbo: buffer to be released
168  *
169  */
170 static void most_free_mbo_coherent(struct mbo *mbo)
171 {
172         struct most_c_obj *c = mbo->context;
173         u16 const coherent_buf_size = c->cfg.buffer_size + c->cfg.extra_len;
174
175         dma_free_coherent(NULL, coherent_buf_size, mbo->virt_address,
176                           mbo->bus_address);
177         kfree(mbo);
178         if (atomic_sub_and_test(1, &c->mbo_ref))
179                 complete(&c->cleanup);
180 }
181
182 /**
183  * flush_channel_fifos - clear the channel fifos
184  * @c: pointer to channel object
185  */
186 static void flush_channel_fifos(struct most_c_obj *c)
187 {
188         unsigned long flags, hf_flags;
189         struct mbo *mbo, *tmp;
190
191         if (list_empty(&c->fifo) && list_empty(&c->halt_fifo))
192                 return;
193
194         spin_lock_irqsave(&c->fifo_lock, flags);
195         list_for_each_entry_safe(mbo, tmp, &c->fifo, list) {
196                 list_del(&mbo->list);
197                 spin_unlock_irqrestore(&c->fifo_lock, flags);
198                 most_free_mbo_coherent(mbo);
199                 spin_lock_irqsave(&c->fifo_lock, flags);
200         }
201         spin_unlock_irqrestore(&c->fifo_lock, flags);
202
203         spin_lock_irqsave(&c->fifo_lock, hf_flags);
204         list_for_each_entry_safe(mbo, tmp, &c->halt_fifo, list) {
205                 list_del(&mbo->list);
206                 spin_unlock_irqrestore(&c->fifo_lock, hf_flags);
207                 most_free_mbo_coherent(mbo);
208                 spin_lock_irqsave(&c->fifo_lock, hf_flags);
209         }
210         spin_unlock_irqrestore(&c->fifo_lock, hf_flags);
211
212         if (unlikely((!list_empty(&c->fifo) || !list_empty(&c->halt_fifo))))
213                 pr_info("WARN: fifo | trash fifo not empty\n");
214 }
215
216 /**
217  * flush_trash_fifo - clear the trash fifo
218  * @c: pointer to channel object
219  */
220 static int flush_trash_fifo(struct most_c_obj *c)
221 {
222         struct mbo *mbo, *tmp;
223         unsigned long flags;
224
225         spin_lock_irqsave(&c->fifo_lock, flags);
226         list_for_each_entry_safe(mbo, tmp, &c->trash_fifo, list) {
227                 list_del(&mbo->list);
228                 spin_unlock_irqrestore(&c->fifo_lock, flags);
229                 most_free_mbo_coherent(mbo);
230                 spin_lock_irqsave(&c->fifo_lock, flags);
231         }
232         spin_unlock_irqrestore(&c->fifo_lock, flags);
233         return 0;
234 }
235
236 /**
237  * most_channel_release - release function of channel object
238  * @kobj: pointer to channel's kobject
239  */
240 static void most_channel_release(struct kobject *kobj)
241 {
242         struct most_c_obj *c = to_c_obj(kobj);
243
244         kfree(c);
245 }
246
247 static ssize_t show_available_directions(struct most_c_obj *c,
248                                          struct most_c_attr *attr,
249                                          char *buf)
250 {
251         unsigned int i = c->channel_id;
252
253         strcpy(buf, "");
254         if (c->iface->channel_vector[i].direction & MOST_CH_RX)
255                 strcat(buf, "dir_rx ");
256         if (c->iface->channel_vector[i].direction & MOST_CH_TX)
257                 strcat(buf, "dir_tx ");
258         strcat(buf, "\n");
259         return strlen(buf) + 1;
260 }
261
262 static ssize_t show_available_datatypes(struct most_c_obj *c,
263                                         struct most_c_attr *attr,
264                                         char *buf)
265 {
266         unsigned int i = c->channel_id;
267
268         strcpy(buf, "");
269         if (c->iface->channel_vector[i].data_type & MOST_CH_CONTROL)
270                 strcat(buf, "control ");
271         if (c->iface->channel_vector[i].data_type & MOST_CH_ASYNC)
272                 strcat(buf, "async ");
273         if (c->iface->channel_vector[i].data_type & MOST_CH_SYNC)
274                 strcat(buf, "sync ");
275         if (c->iface->channel_vector[i].data_type & MOST_CH_ISOC_AVP)
276                 strcat(buf, "isoc_avp ");
277         strcat(buf, "\n");
278         return strlen(buf) + 1;
279 }
280
281 static
282 ssize_t show_number_of_packet_buffers(struct most_c_obj *c,
283                                       struct most_c_attr *attr,
284                                       char *buf)
285 {
286         unsigned int i = c->channel_id;
287
288         return snprintf(buf, PAGE_SIZE, "%d\n",
289                         c->iface->channel_vector[i].num_buffers_packet);
290 }
291
292 static
293 ssize_t show_number_of_stream_buffers(struct most_c_obj *c,
294                                       struct most_c_attr *attr,
295                                       char *buf)
296 {
297         unsigned int i = c->channel_id;
298
299         return snprintf(buf, PAGE_SIZE, "%d\n",
300                         c->iface->channel_vector[i].num_buffers_streaming);
301 }
302
303 static
304 ssize_t show_size_of_packet_buffer(struct most_c_obj *c,
305                                    struct most_c_attr *attr,
306                                    char *buf)
307 {
308         unsigned int i = c->channel_id;
309
310         return snprintf(buf, PAGE_SIZE, "%d\n",
311                         c->iface->channel_vector[i].buffer_size_packet);
312 }
313
314 static
315 ssize_t show_size_of_stream_buffer(struct most_c_obj *c,
316                                    struct most_c_attr *attr,
317                                    char *buf)
318 {
319         unsigned int i = c->channel_id;
320
321         return snprintf(buf, PAGE_SIZE, "%d\n",
322                         c->iface->channel_vector[i].buffer_size_streaming);
323 }
324
325 static ssize_t show_channel_starving(struct most_c_obj *c,
326                                      struct most_c_attr *attr,
327                                      char *buf)
328 {
329         return snprintf(buf, PAGE_SIZE, "%d\n", c->is_starving);
330 }
331
332 #define create_show_channel_attribute(val) \
333         static MOST_CHNL_ATTR(val, S_IRUGO, show_##val, NULL)
334
335 create_show_channel_attribute(available_directions);
336 create_show_channel_attribute(available_datatypes);
337 create_show_channel_attribute(number_of_packet_buffers);
338 create_show_channel_attribute(number_of_stream_buffers);
339 create_show_channel_attribute(size_of_stream_buffer);
340 create_show_channel_attribute(size_of_packet_buffer);
341 create_show_channel_attribute(channel_starving);
342
343 static ssize_t show_set_number_of_buffers(struct most_c_obj *c,
344                                           struct most_c_attr *attr,
345                                           char *buf)
346 {
347         return snprintf(buf, PAGE_SIZE, "%d\n", c->cfg.num_buffers);
348 }
349
350 static ssize_t store_set_number_of_buffers(struct most_c_obj *c,
351                                            struct most_c_attr *attr,
352                                            const char *buf,
353                                            size_t count)
354 {
355         int ret = kstrtou16(buf, 0, &c->cfg.num_buffers);
356
357         if (ret)
358                 return ret;
359         return count;
360 }
361
362 static ssize_t show_set_buffer_size(struct most_c_obj *c,
363                                     struct most_c_attr *attr,
364                                     char *buf)
365 {
366         return snprintf(buf, PAGE_SIZE, "%d\n", c->cfg.buffer_size);
367 }
368
369 static ssize_t store_set_buffer_size(struct most_c_obj *c,
370                                      struct most_c_attr *attr,
371                                      const char *buf,
372                                      size_t count)
373 {
374         int ret = kstrtou16(buf, 0, &c->cfg.buffer_size);
375
376         if (ret)
377                 return ret;
378         return count;
379 }
380
381 static ssize_t show_set_direction(struct most_c_obj *c,
382                                   struct most_c_attr *attr,
383                                   char *buf)
384 {
385         if (c->cfg.direction & MOST_CH_TX)
386                 return snprintf(buf, PAGE_SIZE, "dir_tx\n");
387         else if (c->cfg.direction & MOST_CH_RX)
388                 return snprintf(buf, PAGE_SIZE, "dir_rx\n");
389         return snprintf(buf, PAGE_SIZE, "unconfigured\n");
390 }
391
392 static ssize_t store_set_direction(struct most_c_obj *c,
393                                    struct most_c_attr *attr,
394                                    const char *buf,
395                                    size_t count)
396 {
397         if (!strcmp(buf, "dir_rx\n")) {
398                 c->cfg.direction = MOST_CH_RX;
399         } else if (!strcmp(buf, "dir_tx\n")) {
400                 c->cfg.direction = MOST_CH_TX;
401         } else {
402                 pr_info("WARN: invalid attribute settings\n");
403                 return -EINVAL;
404         }
405         return count;
406 }
407
408 static ssize_t show_set_datatype(struct most_c_obj *c,
409                                  struct most_c_attr *attr,
410                                  char *buf)
411 {
412         if (c->cfg.data_type & MOST_CH_CONTROL)
413                 return snprintf(buf, PAGE_SIZE, "control\n");
414         else if (c->cfg.data_type & MOST_CH_ASYNC)
415                 return snprintf(buf, PAGE_SIZE, "async\n");
416         else if (c->cfg.data_type & MOST_CH_SYNC)
417                 return snprintf(buf, PAGE_SIZE, "sync\n");
418         else if (c->cfg.data_type & MOST_CH_ISOC_AVP)
419                 return snprintf(buf, PAGE_SIZE, "isoc_avp\n");
420         return snprintf(buf, PAGE_SIZE, "unconfigured\n");
421 }
422
423 static ssize_t store_set_datatype(struct most_c_obj *c,
424                                   struct most_c_attr *attr,
425                                   const char *buf,
426                                   size_t count)
427 {
428         if (!strcmp(buf, "control\n")) {
429                 c->cfg.data_type = MOST_CH_CONTROL;
430         } else if (!strcmp(buf, "async\n")) {
431                 c->cfg.data_type = MOST_CH_ASYNC;
432         } else if (!strcmp(buf, "sync\n")) {
433                 c->cfg.data_type = MOST_CH_SYNC;
434         } else if (!strcmp(buf, "isoc_avp\n")) {
435                 c->cfg.data_type = MOST_CH_ISOC_AVP;
436         } else {
437                 pr_info("WARN: invalid attribute settings\n");
438                 return -EINVAL;
439         }
440         return count;
441 }
442
443 static ssize_t show_set_subbuffer_size(struct most_c_obj *c,
444                                        struct most_c_attr *attr,
445                                        char *buf)
446 {
447         return snprintf(buf, PAGE_SIZE, "%d\n", c->cfg.subbuffer_size);
448 }
449
450 static ssize_t store_set_subbuffer_size(struct most_c_obj *c,
451                                         struct most_c_attr *attr,
452                                         const char *buf,
453                                         size_t count)
454 {
455         int ret = kstrtou16(buf, 0, &c->cfg.subbuffer_size);
456
457         if (ret)
458                 return ret;
459         return count;
460 }
461
462 static ssize_t show_set_packets_per_xact(struct most_c_obj *c,
463                                          struct most_c_attr *attr,
464                                          char *buf)
465 {
466         return snprintf(buf, PAGE_SIZE, "%d\n", c->cfg.packets_per_xact);
467 }
468
469 static ssize_t store_set_packets_per_xact(struct most_c_obj *c,
470                                           struct most_c_attr *attr,
471                                           const char *buf,
472                                           size_t count)
473 {
474         int ret = kstrtou16(buf, 0, &c->cfg.packets_per_xact);
475
476         if (ret)
477                 return ret;
478         return count;
479 }
480
481 #define create_channel_attribute(value) \
482         static MOST_CHNL_ATTR(value, S_IRUGO | S_IWUSR, \
483                               show_##value, \
484                               store_##value)
485
486 create_channel_attribute(set_buffer_size);
487 create_channel_attribute(set_number_of_buffers);
488 create_channel_attribute(set_direction);
489 create_channel_attribute(set_datatype);
490 create_channel_attribute(set_subbuffer_size);
491 create_channel_attribute(set_packets_per_xact);
492
493 /**
494  * most_channel_def_attrs - array of default attributes of channel object
495  */
496 static struct attribute *most_channel_def_attrs[] = {
497         &most_chnl_attr_available_directions.attr,
498         &most_chnl_attr_available_datatypes.attr,
499         &most_chnl_attr_number_of_packet_buffers.attr,
500         &most_chnl_attr_number_of_stream_buffers.attr,
501         &most_chnl_attr_size_of_packet_buffer.attr,
502         &most_chnl_attr_size_of_stream_buffer.attr,
503         &most_chnl_attr_set_number_of_buffers.attr,
504         &most_chnl_attr_set_buffer_size.attr,
505         &most_chnl_attr_set_direction.attr,
506         &most_chnl_attr_set_datatype.attr,
507         &most_chnl_attr_set_subbuffer_size.attr,
508         &most_chnl_attr_set_packets_per_xact.attr,
509         &most_chnl_attr_channel_starving.attr,
510         NULL,
511 };
512
513 static struct kobj_type most_channel_ktype = {
514         .sysfs_ops = &most_channel_sysfs_ops,
515         .release = most_channel_release,
516         .default_attrs = most_channel_def_attrs,
517 };
518
519 static struct kset *most_channel_kset;
520
521 /**
522  * create_most_c_obj - allocates a channel object
523  * @name: name of the channel object
524  * @parent: parent kobject
525  *
526  * This create a channel object and registers it with sysfs.
527  * Returns a pointer to the object or NULL when something went wrong.
528  */
529 static struct most_c_obj *
530 create_most_c_obj(const char *name, struct kobject *parent)
531 {
532         struct most_c_obj *c;
533         int retval;
534
535         c = kzalloc(sizeof(*c), GFP_KERNEL);
536         if (!c)
537                 return NULL;
538         c->kobj.kset = most_channel_kset;
539         retval = kobject_init_and_add(&c->kobj, &most_channel_ktype, parent,
540                                       "%s", name);
541         if (retval) {
542                 kobject_put(&c->kobj);
543                 return NULL;
544         }
545         kobject_uevent(&c->kobj, KOBJ_ADD);
546         return c;
547 }
548
549 /*                   ___               ___
550  *                   ___I N S T A N C E___
551  */
552 #define MOST_INST_ATTR(_name, _mode, _show, _store) \
553                 struct most_inst_attribute most_inst_attr_##_name = \
554                 __ATTR(_name, _mode, _show, _store)
555
556 static struct list_head instance_list;
557
558 /**
559  * struct most_inst_attribute - to access the attributes of instance object
560  * @attr: attributes of an instance
561  * @show: pointer to the show function
562  * @store: pointer to the store function
563  */
564 struct most_inst_attribute {
565         struct attribute attr;
566         ssize_t (*show)(struct most_inst_obj *d,
567                         struct most_inst_attribute *attr,
568                         char *buf);
569         ssize_t (*store)(struct most_inst_obj *d,
570                          struct most_inst_attribute *attr,
571                          const char *buf,
572                          size_t count);
573 };
574
575 #define to_instance_attr(a) \
576         container_of(a, struct most_inst_attribute, attr)
577
578 /**
579  * instance_attr_show - show function for an instance object
580  * @kobj: pointer to kobject
581  * @attr: pointer to attribute struct
582  * @buf: buffer
583  */
584 static ssize_t instance_attr_show(struct kobject *kobj,
585                                   struct attribute *attr,
586                                   char *buf)
587 {
588         struct most_inst_attribute *instance_attr;
589         struct most_inst_obj *instance_obj;
590
591         instance_attr = to_instance_attr(attr);
592         instance_obj = to_inst_obj(kobj);
593
594         if (!instance_attr->show)
595                 return -EIO;
596
597         return instance_attr->show(instance_obj, instance_attr, buf);
598 }
599
600 /**
601  * instance_attr_store - store function for an instance object
602  * @kobj: pointer to kobject
603  * @attr: pointer to attribute struct
604  * @buf: buffer
605  * @len: length of buffer
606  */
607 static ssize_t instance_attr_store(struct kobject *kobj,
608                                    struct attribute *attr,
609                                    const char *buf,
610                                    size_t len)
611 {
612         struct most_inst_attribute *instance_attr;
613         struct most_inst_obj *instance_obj;
614
615         instance_attr = to_instance_attr(attr);
616         instance_obj = to_inst_obj(kobj);
617
618         if (!instance_attr->store)
619                 return -EIO;
620
621         return instance_attr->store(instance_obj, instance_attr, buf, len);
622 }
623
624 static const struct sysfs_ops most_inst_sysfs_ops = {
625         .show = instance_attr_show,
626         .store = instance_attr_store,
627 };
628
629 /**
630  * most_inst_release - release function for instance object
631  * @kobj: pointer to instance's kobject
632  *
633  * This frees the allocated memory for the instance object
634  */
635 static void most_inst_release(struct kobject *kobj)
636 {
637         struct most_inst_obj *inst = to_inst_obj(kobj);
638
639         kfree(inst);
640 }
641
642 static ssize_t show_description(struct most_inst_obj *instance_obj,
643                                 struct most_inst_attribute *attr,
644                                 char *buf)
645 {
646         return snprintf(buf, PAGE_SIZE, "%s\n",
647                         instance_obj->iface->description);
648 }
649
650 static ssize_t show_interface(struct most_inst_obj *instance_obj,
651                               struct most_inst_attribute *attr,
652                               char *buf)
653 {
654         switch (instance_obj->iface->interface) {
655         case ITYPE_LOOPBACK:
656                 return snprintf(buf, PAGE_SIZE, "loopback\n");
657         case ITYPE_I2C:
658                 return snprintf(buf, PAGE_SIZE, "i2c\n");
659         case ITYPE_I2S:
660                 return snprintf(buf, PAGE_SIZE, "i2s\n");
661         case ITYPE_TSI:
662                 return snprintf(buf, PAGE_SIZE, "tsi\n");
663         case ITYPE_HBI:
664                 return snprintf(buf, PAGE_SIZE, "hbi\n");
665         case ITYPE_MEDIALB_DIM:
666                 return snprintf(buf, PAGE_SIZE, "mlb_dim\n");
667         case ITYPE_MEDIALB_DIM2:
668                 return snprintf(buf, PAGE_SIZE, "mlb_dim2\n");
669         case ITYPE_USB:
670                 return snprintf(buf, PAGE_SIZE, "usb\n");
671         case ITYPE_PCIE:
672                 return snprintf(buf, PAGE_SIZE, "pcie\n");
673         }
674         return snprintf(buf, PAGE_SIZE, "unknown\n");
675 }
676
677 #define create_inst_attribute(value) \
678         static MOST_INST_ATTR(value, S_IRUGO, show_##value, NULL)
679
680 create_inst_attribute(description);
681 create_inst_attribute(interface);
682
683 static struct attribute *most_inst_def_attrs[] = {
684         &most_inst_attr_description.attr,
685         &most_inst_attr_interface.attr,
686         NULL,
687 };
688
689 static struct kobj_type most_inst_ktype = {
690         .sysfs_ops = &most_inst_sysfs_ops,
691         .release = most_inst_release,
692         .default_attrs = most_inst_def_attrs,
693 };
694
695 static struct kset *most_inst_kset;
696
697 /**
698  * create_most_inst_obj - creates an instance object
699  * @name: name of the object to be created
700  *
701  * This allocates memory for an instance structure, assigns the proper kset
702  * and registers it with sysfs.
703  *
704  * Returns a pointer to the instance object or NULL when something went wrong.
705  */
706 static struct most_inst_obj *create_most_inst_obj(const char *name)
707 {
708         struct most_inst_obj *inst;
709         int retval;
710
711         inst = kzalloc(sizeof(*inst), GFP_KERNEL);
712         if (!inst)
713                 return NULL;
714         inst->kobj.kset = most_inst_kset;
715         retval = kobject_init_and_add(&inst->kobj, &most_inst_ktype, NULL,
716                                       "%s", name);
717         if (retval) {
718                 kobject_put(&inst->kobj);
719                 return NULL;
720         }
721         kobject_uevent(&inst->kobj, KOBJ_ADD);
722         return inst;
723 }
724
725 /**
726  * destroy_most_inst_obj - MOST instance release function
727  * @inst: pointer to the instance object
728  *
729  * This decrements the reference counter of the instance object.
730  * If the reference count turns zero, its release function is called
731  */
732 static void destroy_most_inst_obj(struct most_inst_obj *inst)
733 {
734         struct most_c_obj *c, *tmp;
735
736         list_for_each_entry_safe(c, tmp, &inst->channel_list, list) {
737                 flush_trash_fifo(c);
738                 flush_channel_fifos(c);
739                 kobject_put(&c->kobj);
740         }
741         kobject_put(&inst->kobj);
742 }
743
744 /*                   ___     ___
745  *                   ___A I M___
746  */
747 struct most_aim_obj {
748         struct kobject kobj;
749         struct list_head list;
750         struct most_aim *driver;
751         char add_link[STRING_SIZE];
752         char remove_link[STRING_SIZE];
753 };
754
755 #define to_aim_obj(d) container_of(d, struct most_aim_obj, kobj)
756
757 static struct list_head aim_list;
758
759 /**
760  * struct most_aim_attribute - to access the attributes of AIM object
761  * @attr: attributes of an AIM
762  * @show: pointer to the show function
763  * @store: pointer to the store function
764  */
765 struct most_aim_attribute {
766         struct attribute attr;
767         ssize_t (*show)(struct most_aim_obj *d,
768                         struct most_aim_attribute *attr,
769                         char *buf);
770         ssize_t (*store)(struct most_aim_obj *d,
771                          struct most_aim_attribute *attr,
772                          const char *buf,
773                          size_t count);
774 };
775
776 #define to_aim_attr(a) container_of(a, struct most_aim_attribute, attr)
777
778 /**
779  * aim_attr_show - show function of an AIM object
780  * @kobj: pointer to kobject
781  * @attr: pointer to attribute struct
782  * @buf: buffer
783  */
784 static ssize_t aim_attr_show(struct kobject *kobj,
785                              struct attribute *attr,
786                              char *buf)
787 {
788         struct most_aim_attribute *aim_attr;
789         struct most_aim_obj *aim_obj;
790
791         aim_attr = to_aim_attr(attr);
792         aim_obj = to_aim_obj(kobj);
793
794         if (!aim_attr->show)
795                 return -EIO;
796
797         return aim_attr->show(aim_obj, aim_attr, buf);
798 }
799
800 /**
801  * aim_attr_store - store function of an AIM object
802  * @kobj: pointer to kobject
803  * @attr: pointer to attribute struct
804  * @buf: buffer
805  * @len: length of buffer
806  */
807 static ssize_t aim_attr_store(struct kobject *kobj,
808                               struct attribute *attr,
809                               const char *buf,
810                               size_t len)
811 {
812         struct most_aim_attribute *aim_attr;
813         struct most_aim_obj *aim_obj;
814
815         aim_attr = to_aim_attr(attr);
816         aim_obj = to_aim_obj(kobj);
817
818         if (!aim_attr->store)
819                 return -EIO;
820         return aim_attr->store(aim_obj, aim_attr, buf, len);
821 }
822
823 static const struct sysfs_ops most_aim_sysfs_ops = {
824         .show = aim_attr_show,
825         .store = aim_attr_store,
826 };
827
828 /**
829  * most_aim_release - AIM release function
830  * @kobj: pointer to AIM's kobject
831  */
832 static void most_aim_release(struct kobject *kobj)
833 {
834         struct most_aim_obj *aim_obj = to_aim_obj(kobj);
835
836         kfree(aim_obj);
837 }
838
839 static ssize_t show_add_link(struct most_aim_obj *aim_obj,
840                              struct most_aim_attribute *attr,
841                              char *buf)
842 {
843         return snprintf(buf, PAGE_SIZE, "%s\n", aim_obj->add_link);
844 }
845
846 /**
847  * split_string - parses and changes string in the buffer buf and
848  * splits it into two mandatory and one optional substrings.
849  *
850  * @buf: complete string from attribute 'add_channel'
851  * @a: address of pointer to 1st substring (=instance name)
852  * @b: address of pointer to 2nd substring (=channel name)
853  * @c: optional address of pointer to 3rd substring (=user defined name)
854  *
855  * Examples:
856  *
857  * Input: "mdev0:ch0@ep_81:my_channel\n" or
858  *        "mdev0:ch0@ep_81:my_channel"
859  *
860  * Output: *a -> "mdev0", *b -> "ch0@ep_81", *c -> "my_channel"
861  *
862  * Input: "mdev0:ch0@ep_81\n"
863  * Output: *a -> "mdev0", *b -> "ch0@ep_81", *c -> ""
864  *
865  * Input: "mdev0:ch0@ep_81"
866  * Output: *a -> "mdev0", *b -> "ch0@ep_81", *c == NULL
867  */
868 static int split_string(char *buf, char **a, char **b, char **c)
869 {
870         *a = strsep(&buf, ":");
871         if (!*a)
872                 return -EIO;
873
874         *b = strsep(&buf, ":\n");
875         if (!*b)
876                 return -EIO;
877
878         if (c)
879                 *c = strsep(&buf, ":\n");
880
881         return 0;
882 }
883
884 /**
885  * get_channel_by_name - get pointer to channel object
886  * @mdev: name of the device instance
887  * @mdev_ch: name of the respective channel
888  *
889  * This retrieves the pointer to a channel object.
890  */
891 static struct
892 most_c_obj *get_channel_by_name(char *mdev, char *mdev_ch)
893 {
894         struct most_c_obj *c, *tmp;
895         struct most_inst_obj *i, *i_tmp;
896         int found = 0;
897
898         list_for_each_entry_safe(i, i_tmp, &instance_list, list) {
899                 if (!strcmp(kobject_name(&i->kobj), mdev)) {
900                         found++;
901                         break;
902                 }
903         }
904         if (unlikely(!found))
905                 return ERR_PTR(-EIO);
906
907         list_for_each_entry_safe(c, tmp, &i->channel_list, list) {
908                 if (!strcmp(kobject_name(&c->kobj), mdev_ch)) {
909                         found++;
910                         break;
911                 }
912         }
913         if (unlikely(found < 2))
914                 return ERR_PTR(-EIO);
915         return c;
916 }
917
918 /**
919  * store_add_link - store() function for add_link attribute
920  * @aim_obj: pointer to AIM object
921  * @attr: its attributes
922  * @buf: buffer
923  * @len: buffer length
924  *
925  * This parses the string given by buf and splits it into
926  * three substrings. Note: third substring is optional. In case a cdev
927  * AIM is loaded the optional 3rd substring will make up the name of
928  * device node in the /dev directory. If omitted, the device node will
929  * inherit the channel's name within sysfs.
930  *
931  * Searches for a pair of device and channel and probes the AIM
932  *
933  * Example:
934  * (1) echo -n -e "mdev0:ch0@ep_81:my_rxchannel\n" >add_link
935  * (2) echo -n -e "mdev0:ch0@ep_81\n" >add_link
936  *
937  * (1) would create the device node /dev/my_rxchannel
938  * (2) would create the device node /dev/mdev0-ch0@ep_81
939  */
940 static ssize_t store_add_link(struct most_aim_obj *aim_obj,
941                               struct most_aim_attribute *attr,
942                               const char *buf,
943                               size_t len)
944 {
945         struct most_c_obj *c;
946         struct most_aim **aim_ptr;
947         char buffer[STRING_SIZE];
948         char *mdev;
949         char *mdev_ch;
950         char *mdev_devnod;
951         char devnod_buf[STRING_SIZE];
952         int ret;
953         size_t max_len = min_t(size_t, len + 1, STRING_SIZE);
954
955         strlcpy(buffer, buf, max_len);
956         strlcpy(aim_obj->add_link, buf, max_len);
957
958         ret = split_string(buffer, &mdev, &mdev_ch, &mdev_devnod);
959         if (ret)
960                 return ret;
961
962         if (!mdev_devnod || *mdev_devnod == 0) {
963                 snprintf(devnod_buf, sizeof(devnod_buf), "%s-%s", mdev,
964                          mdev_ch);
965                 mdev_devnod = devnod_buf;
966         }
967
968         c = get_channel_by_name(mdev, mdev_ch);
969         if (IS_ERR(c))
970                 return -ENODEV;
971
972         if (!c->aim0.ptr)
973                 aim_ptr = &c->aim0.ptr;
974         else if (!c->aim1.ptr)
975                 aim_ptr = &c->aim1.ptr;
976         else
977                 return -ENOSPC;
978
979         *aim_ptr = aim_obj->driver;
980         ret = aim_obj->driver->probe_channel(c->iface, c->channel_id,
981                                              &c->cfg, &c->kobj, mdev_devnod);
982         if (ret) {
983                 *aim_ptr = NULL;
984                 return ret;
985         }
986
987         return len;
988 }
989
990 static struct most_aim_attribute most_aim_attr_add_link =
991         __ATTR(add_link, S_IRUGO | S_IWUSR, show_add_link, store_add_link);
992
993 static ssize_t show_remove_link(struct most_aim_obj *aim_obj,
994                                 struct most_aim_attribute *attr,
995                                 char *buf)
996 {
997         return snprintf(buf, PAGE_SIZE, "%s\n", aim_obj->remove_link);
998 }
999
1000 /**
1001  * store_remove_link - store function for remove_link attribute
1002  * @aim_obj: pointer to AIM object
1003  * @attr: its attributes
1004  * @buf: buffer
1005  * @len: buffer length
1006  *
1007  * Example:
1008  * echo -n -e "mdev0:ch0@ep_81\n" >remove_link
1009  */
1010 static ssize_t store_remove_link(struct most_aim_obj *aim_obj,
1011                                  struct most_aim_attribute *attr,
1012                                  const char *buf,
1013                                  size_t len)
1014 {
1015         struct most_c_obj *c;
1016         char buffer[STRING_SIZE];
1017         char *mdev;
1018         char *mdev_ch;
1019         int ret;
1020         size_t max_len = min_t(size_t, len + 1, STRING_SIZE);
1021
1022         strlcpy(buffer, buf, max_len);
1023         strlcpy(aim_obj->remove_link, buf, max_len);
1024         ret = split_string(buffer, &mdev, &mdev_ch, NULL);
1025         if (ret)
1026                 return ret;
1027
1028         c = get_channel_by_name(mdev, mdev_ch);
1029         if (IS_ERR(c))
1030                 return -ENODEV;
1031
1032         if (aim_obj->driver->disconnect_channel(c->iface, c->channel_id))
1033                 return -EIO;
1034         if (c->aim0.ptr == aim_obj->driver)
1035                 c->aim0.ptr = NULL;
1036         if (c->aim1.ptr == aim_obj->driver)
1037                 c->aim1.ptr = NULL;
1038         return len;
1039 }
1040
1041 static struct most_aim_attribute most_aim_attr_remove_link =
1042         __ATTR(remove_link, S_IRUGO | S_IWUSR, show_remove_link,
1043                store_remove_link);
1044
1045 static struct attribute *most_aim_def_attrs[] = {
1046         &most_aim_attr_add_link.attr,
1047         &most_aim_attr_remove_link.attr,
1048         NULL,
1049 };
1050
1051 static struct kobj_type most_aim_ktype = {
1052         .sysfs_ops = &most_aim_sysfs_ops,
1053         .release = most_aim_release,
1054         .default_attrs = most_aim_def_attrs,
1055 };
1056
1057 static struct kset *most_aim_kset;
1058
1059 /**
1060  * create_most_aim_obj - creates an AIM object
1061  * @name: name of the AIM
1062  *
1063  * This creates an AIM object assigns the proper kset and registers
1064  * it with sysfs.
1065  * Returns a pointer to the object or NULL if something went wrong.
1066  */
1067 static struct most_aim_obj *create_most_aim_obj(const char *name)
1068 {
1069         struct most_aim_obj *most_aim;
1070         int retval;
1071
1072         most_aim = kzalloc(sizeof(*most_aim), GFP_KERNEL);
1073         if (!most_aim)
1074                 return NULL;
1075         most_aim->kobj.kset = most_aim_kset;
1076         retval = kobject_init_and_add(&most_aim->kobj, &most_aim_ktype,
1077                                       NULL, "%s", name);
1078         if (retval) {
1079                 kobject_put(&most_aim->kobj);
1080                 return NULL;
1081         }
1082         kobject_uevent(&most_aim->kobj, KOBJ_ADD);
1083         return most_aim;
1084 }
1085
1086 /**
1087  * destroy_most_aim_obj - AIM release function
1088  * @p: pointer to AIM object
1089  *
1090  * This decrements the reference counter of the AIM object. If the
1091  * reference count turns zero, its release function will be called.
1092  */
1093 static void destroy_most_aim_obj(struct most_aim_obj *p)
1094 {
1095         kobject_put(&p->kobj);
1096 }
1097
1098 /*                   ___       ___
1099  *                   ___C O R E___
1100  */
1101
1102 /**
1103  * Instantiation of the MOST bus
1104  */
1105 static struct bus_type most_bus = {
1106         .name = "most",
1107 };
1108
1109 /**
1110  * Instantiation of the core driver
1111  */
1112 static struct device_driver mostcore = {
1113         .name = "mostcore",
1114         .bus = &most_bus,
1115 };
1116
1117 static inline void trash_mbo(struct mbo *mbo)
1118 {
1119         unsigned long flags;
1120         struct most_c_obj *c = mbo->context;
1121
1122         spin_lock_irqsave(&c->fifo_lock, flags);
1123         list_add(&mbo->list, &c->trash_fifo);
1124         spin_unlock_irqrestore(&c->fifo_lock, flags);
1125 }
1126
1127 static struct mbo *get_hdm_mbo(struct most_c_obj *c)
1128 {
1129         unsigned long flags;
1130         struct mbo *mbo;
1131
1132         spin_lock_irqsave(&c->fifo_lock, flags);
1133         if (c->enqueue_halt || list_empty(&c->halt_fifo))
1134                 mbo = NULL;
1135         else
1136                 mbo = list_pop_mbo(&c->halt_fifo);
1137         spin_unlock_irqrestore(&c->fifo_lock, flags);
1138         return mbo;
1139 }
1140
1141 static void nq_hdm_mbo(struct mbo *mbo)
1142 {
1143         unsigned long flags;
1144         struct most_c_obj *c = mbo->context;
1145
1146         spin_lock_irqsave(&c->fifo_lock, flags);
1147         list_add_tail(&mbo->list, &c->halt_fifo);
1148         spin_unlock_irqrestore(&c->fifo_lock, flags);
1149         wake_up_interruptible(&c->hdm_fifo_wq);
1150 }
1151
1152 static int hdm_enqueue_thread(void *data)
1153 {
1154         struct most_c_obj *c = data;
1155         struct mbo *mbo;
1156         typeof(c->iface->enqueue) enqueue = c->iface->enqueue;
1157
1158         while (likely(!kthread_should_stop())) {
1159                 wait_event_interruptible(c->hdm_fifo_wq,
1160                                          (mbo = get_hdm_mbo(c)) ||
1161                                          kthread_should_stop());
1162
1163                 if (unlikely(!mbo))
1164                         continue;
1165
1166                 if (c->cfg.direction == MOST_CH_RX)
1167                         mbo->buffer_length = c->cfg.buffer_size;
1168
1169                 if (unlikely(enqueue(mbo->ifp, mbo->hdm_channel_id, mbo))) {
1170                         pr_err("hdm enqueue failed\n");
1171                         nq_hdm_mbo(mbo);
1172                         c->hdm_enqueue_task = NULL;
1173                         return 0;
1174                 }
1175         }
1176
1177         return 0;
1178 }
1179
1180 static int run_enqueue_thread(struct most_c_obj *c, int channel_id)
1181 {
1182         struct task_struct *task =
1183                 kthread_run(hdm_enqueue_thread, c, "hdm_fifo_%d",
1184                             channel_id);
1185
1186         if (IS_ERR(task))
1187                 return PTR_ERR(task);
1188
1189         c->hdm_enqueue_task = task;
1190         return 0;
1191 }
1192
1193 /**
1194  * arm_mbo - recycle MBO for further usage
1195  * @mbo: buffer object
1196  *
1197  * This puts an MBO back to the list to have it ready for up coming
1198  * tx transactions.
1199  *
1200  * In case the MBO belongs to a channel that recently has been
1201  * poisoned, the MBO is scheduled to be trashed.
1202  * Calls the completion handler of an attached AIM.
1203  */
1204 static void arm_mbo(struct mbo *mbo)
1205 {
1206         unsigned long flags;
1207         struct most_c_obj *c;
1208
1209         BUG_ON((!mbo) || (!mbo->context));
1210         c = mbo->context;
1211
1212         if (c->is_poisoned) {
1213                 trash_mbo(mbo);
1214                 return;
1215         }
1216
1217         spin_lock_irqsave(&c->fifo_lock, flags);
1218         ++*mbo->num_buffers_ptr;
1219         list_add_tail(&mbo->list, &c->fifo);
1220         spin_unlock_irqrestore(&c->fifo_lock, flags);
1221
1222         if (c->aim0.refs && c->aim0.ptr->tx_completion)
1223                 c->aim0.ptr->tx_completion(c->iface, c->channel_id);
1224
1225         if (c->aim1.refs && c->aim1.ptr->tx_completion)
1226                 c->aim1.ptr->tx_completion(c->iface, c->channel_id);
1227 }
1228
1229 /**
1230  * arm_mbo_chain - helper function that arms an MBO chain for the HDM
1231  * @c: pointer to interface channel
1232  * @dir: direction of the channel
1233  * @compl: pointer to completion function
1234  *
1235  * This allocates buffer objects including the containing DMA coherent
1236  * buffer and puts them in the fifo.
1237  * Buffers of Rx channels are put in the kthread fifo, hence immediately
1238  * submitted to the HDM.
1239  *
1240  * Returns the number of allocated and enqueued MBOs.
1241  */
1242 static int arm_mbo_chain(struct most_c_obj *c, int dir,
1243                          void (*compl)(struct mbo *))
1244 {
1245         unsigned int i;
1246         int retval;
1247         struct mbo *mbo;
1248         u32 coherent_buf_size = c->cfg.buffer_size + c->cfg.extra_len;
1249
1250         atomic_set(&c->mbo_nq_level, 0);
1251
1252         for (i = 0; i < c->cfg.num_buffers; i++) {
1253                 mbo = kzalloc(sizeof(*mbo), GFP_KERNEL);
1254                 if (!mbo) {
1255                         pr_info("WARN: Allocation of MBO failed.\n");
1256                         retval = i;
1257                         goto _exit;
1258                 }
1259                 mbo->context = c;
1260                 mbo->ifp = c->iface;
1261                 mbo->hdm_channel_id = c->channel_id;
1262                 mbo->virt_address = dma_alloc_coherent(NULL,
1263                                                        coherent_buf_size,
1264                                                        &mbo->bus_address,
1265                                                        GFP_KERNEL);
1266                 if (!mbo->virt_address) {
1267                         pr_info("WARN: No DMA coherent buffer.\n");
1268                         retval = i;
1269                         goto _error1;
1270                 }
1271                 mbo->complete = compl;
1272                 mbo->num_buffers_ptr = &dummy_num_buffers;
1273                 if (dir == MOST_CH_RX) {
1274                         nq_hdm_mbo(mbo);
1275                         atomic_inc(&c->mbo_nq_level);
1276                 } else {
1277                         arm_mbo(mbo);
1278                 }
1279         }
1280         return i;
1281
1282 _error1:
1283         kfree(mbo);
1284 _exit:
1285         return retval;
1286 }
1287
1288 /**
1289  * most_submit_mbo - submits an MBO to fifo
1290  * @mbo: pointer to the MBO
1291  *
1292  */
1293 int most_submit_mbo(struct mbo *mbo)
1294 {
1295         if (unlikely((!mbo) || (!mbo->context))) {
1296                 pr_err("Bad MBO or missing channel reference\n");
1297                 return -EINVAL;
1298         }
1299
1300         nq_hdm_mbo(mbo);
1301         return 0;
1302 }
1303 EXPORT_SYMBOL_GPL(most_submit_mbo);
1304
1305 /**
1306  * most_write_completion - write completion handler
1307  * @mbo: pointer to MBO
1308  *
1309  * This recycles the MBO for further usage. In case the channel has been
1310  * poisoned, the MBO is scheduled to be trashed.
1311  */
1312 static void most_write_completion(struct mbo *mbo)
1313 {
1314         struct most_c_obj *c;
1315
1316         BUG_ON((!mbo) || (!mbo->context));
1317
1318         c = mbo->context;
1319         if (mbo->status == MBO_E_INVAL)
1320                 pr_info("WARN: Tx MBO status: invalid\n");
1321         if (unlikely(c->is_poisoned || (mbo->status == MBO_E_CLOSE)))
1322                 trash_mbo(mbo);
1323         else
1324                 arm_mbo(mbo);
1325 }
1326
1327 /**
1328  * get_channel_by_iface - get pointer to channel object
1329  * @iface: pointer to interface instance
1330  * @id: channel ID
1331  *
1332  * This retrieves a pointer to a channel of the given interface and channel ID.
1333  */
1334 static struct
1335 most_c_obj *get_channel_by_iface(struct most_interface *iface, int id)
1336 {
1337         struct most_inst_obj *i;
1338
1339         if (unlikely(!iface)) {
1340                 pr_err("Bad interface\n");
1341                 return NULL;
1342         }
1343         if (unlikely((id < 0) || (id >= iface->num_channels))) {
1344                 pr_err("Channel index (%d) out of range\n", id);
1345                 return NULL;
1346         }
1347         i = iface->priv;
1348         if (unlikely(!i)) {
1349                 pr_err("interface is not registered\n");
1350                 return NULL;
1351         }
1352         return i->channel[id];
1353 }
1354
1355 int channel_has_mbo(struct most_interface *iface, int id, struct most_aim *aim)
1356 {
1357         struct most_c_obj *c = get_channel_by_iface(iface, id);
1358         unsigned long flags;
1359         int empty;
1360
1361         if (unlikely(!c))
1362                 return -EINVAL;
1363
1364         if (c->aim0.refs && c->aim1.refs &&
1365             ((aim == c->aim0.ptr && c->aim0.num_buffers <= 0) ||
1366              (aim == c->aim1.ptr && c->aim1.num_buffers <= 0)))
1367                 return false;
1368
1369         spin_lock_irqsave(&c->fifo_lock, flags);
1370         empty = list_empty(&c->fifo);
1371         spin_unlock_irqrestore(&c->fifo_lock, flags);
1372         return !empty;
1373 }
1374 EXPORT_SYMBOL_GPL(channel_has_mbo);
1375
1376 /**
1377  * most_get_mbo - get pointer to an MBO of pool
1378  * @iface: pointer to interface instance
1379  * @id: channel ID
1380  *
1381  * This attempts to get a free buffer out of the channel fifo.
1382  * Returns a pointer to MBO on success or NULL otherwise.
1383  */
1384 struct mbo *most_get_mbo(struct most_interface *iface, int id,
1385                          struct most_aim *aim)
1386 {
1387         struct mbo *mbo;
1388         struct most_c_obj *c;
1389         unsigned long flags;
1390         int *num_buffers_ptr;
1391
1392         c = get_channel_by_iface(iface, id);
1393         if (unlikely(!c))
1394                 return NULL;
1395
1396         if (c->aim0.refs && c->aim1.refs &&
1397             ((aim == c->aim0.ptr && c->aim0.num_buffers <= 0) ||
1398              (aim == c->aim1.ptr && c->aim1.num_buffers <= 0)))
1399                 return NULL;
1400
1401         if (aim == c->aim0.ptr)
1402                 num_buffers_ptr = &c->aim0.num_buffers;
1403         else if (aim == c->aim1.ptr)
1404                 num_buffers_ptr = &c->aim1.num_buffers;
1405         else
1406                 num_buffers_ptr = &dummy_num_buffers;
1407
1408         spin_lock_irqsave(&c->fifo_lock, flags);
1409         if (list_empty(&c->fifo)) {
1410                 spin_unlock_irqrestore(&c->fifo_lock, flags);
1411                 return NULL;
1412         }
1413         mbo = list_pop_mbo(&c->fifo);
1414         --*num_buffers_ptr;
1415         spin_unlock_irqrestore(&c->fifo_lock, flags);
1416
1417         mbo->num_buffers_ptr = num_buffers_ptr;
1418         mbo->buffer_length = c->cfg.buffer_size;
1419         return mbo;
1420 }
1421 EXPORT_SYMBOL_GPL(most_get_mbo);
1422
1423 /**
1424  * most_put_mbo - return buffer to pool
1425  * @mbo: buffer object
1426  */
1427 void most_put_mbo(struct mbo *mbo)
1428 {
1429         struct most_c_obj *c = mbo->context;
1430
1431         if (c->cfg.direction == MOST_CH_TX) {
1432                 arm_mbo(mbo);
1433                 return;
1434         }
1435         nq_hdm_mbo(mbo);
1436         atomic_inc(&c->mbo_nq_level);
1437 }
1438 EXPORT_SYMBOL_GPL(most_put_mbo);
1439
1440 /**
1441  * most_read_completion - read completion handler
1442  * @mbo: pointer to MBO
1443  *
1444  * This function is called by the HDM when data has been received from the
1445  * hardware and copied to the buffer of the MBO.
1446  *
1447  * In case the channel has been poisoned it puts the buffer in the trash queue.
1448  * Otherwise, it passes the buffer to an AIM for further processing.
1449  */
1450 static void most_read_completion(struct mbo *mbo)
1451 {
1452         struct most_c_obj *c = mbo->context;
1453
1454         if (unlikely(c->is_poisoned || (mbo->status == MBO_E_CLOSE))) {
1455                 trash_mbo(mbo);
1456                 return;
1457         }
1458
1459         if (mbo->status == MBO_E_INVAL) {
1460                 nq_hdm_mbo(mbo);
1461                 atomic_inc(&c->mbo_nq_level);
1462                 return;
1463         }
1464
1465         if (atomic_sub_and_test(1, &c->mbo_nq_level)) {
1466                 pr_info("WARN: rx device out of buffers\n");
1467                 c->is_starving = 1;
1468         }
1469
1470         if (c->aim0.refs && c->aim0.ptr->rx_completion &&
1471             c->aim0.ptr->rx_completion(mbo) == 0)
1472                 return;
1473
1474         if (c->aim1.refs && c->aim1.ptr->rx_completion &&
1475             c->aim1.ptr->rx_completion(mbo) == 0)
1476                 return;
1477
1478         most_put_mbo(mbo);
1479 }
1480
1481 /**
1482  * most_start_channel - prepares a channel for communication
1483  * @iface: pointer to interface instance
1484  * @id: channel ID
1485  *
1486  * This prepares the channel for usage. Cross-checks whether the
1487  * channel's been properly configured.
1488  *
1489  * Returns 0 on success or error code otherwise.
1490  */
1491 int most_start_channel(struct most_interface *iface, int id,
1492                        struct most_aim *aim)
1493 {
1494         int num_buffer;
1495         int ret;
1496         struct most_c_obj *c = get_channel_by_iface(iface, id);
1497
1498         if (unlikely(!c))
1499                 return -EINVAL;
1500
1501         mutex_lock(&c->start_mutex);
1502         if (c->aim0.refs + c->aim1.refs > 0)
1503                 goto out; /* already started by other aim */
1504
1505         if (!try_module_get(iface->mod)) {
1506                 pr_info("failed to acquire HDM lock\n");
1507                 mutex_unlock(&c->start_mutex);
1508                 return -ENOLCK;
1509         }
1510
1511         c->cfg.extra_len = 0;
1512         if (c->iface->configure(c->iface, c->channel_id, &c->cfg)) {
1513                 pr_info("channel configuration failed. Go check settings...\n");
1514                 ret = -EINVAL;
1515                 goto error;
1516         }
1517
1518         init_waitqueue_head(&c->hdm_fifo_wq);
1519
1520         if (c->cfg.direction == MOST_CH_RX)
1521                 num_buffer = arm_mbo_chain(c, c->cfg.direction,
1522                                            most_read_completion);
1523         else
1524                 num_buffer = arm_mbo_chain(c, c->cfg.direction,
1525                                            most_write_completion);
1526         if (unlikely(!num_buffer)) {
1527                 pr_info("failed to allocate memory\n");
1528                 ret = -ENOMEM;
1529                 goto error;
1530         }
1531
1532         ret = run_enqueue_thread(c, id);
1533         if (ret)
1534                 goto error;
1535
1536         c->is_starving = 0;
1537         c->aim0.num_buffers = c->cfg.num_buffers / 2;
1538         c->aim1.num_buffers = c->cfg.num_buffers - c->aim0.num_buffers;
1539         atomic_set(&c->mbo_ref, num_buffer);
1540
1541 out:
1542         if (aim == c->aim0.ptr)
1543                 c->aim0.refs++;
1544         if (aim == c->aim1.ptr)
1545                 c->aim1.refs++;
1546         mutex_unlock(&c->start_mutex);
1547         return 0;
1548
1549 error:
1550         module_put(iface->mod);
1551         mutex_unlock(&c->start_mutex);
1552         return ret;
1553 }
1554 EXPORT_SYMBOL_GPL(most_start_channel);
1555
1556 /**
1557  * most_stop_channel - stops a running channel
1558  * @iface: pointer to interface instance
1559  * @id: channel ID
1560  */
1561 int most_stop_channel(struct most_interface *iface, int id,
1562                       struct most_aim *aim)
1563 {
1564         struct most_c_obj *c;
1565
1566         if (unlikely((!iface) || (id >= iface->num_channels) || (id < 0))) {
1567                 pr_err("Bad interface or index out of range\n");
1568                 return -EINVAL;
1569         }
1570         c = get_channel_by_iface(iface, id);
1571         if (unlikely(!c))
1572                 return -EINVAL;
1573
1574         mutex_lock(&c->start_mutex);
1575         if (c->aim0.refs + c->aim1.refs >= 2)
1576                 goto out;
1577
1578         if (c->hdm_enqueue_task)
1579                 kthread_stop(c->hdm_enqueue_task);
1580         c->hdm_enqueue_task = NULL;
1581
1582         if (iface->mod)
1583                 module_put(iface->mod);
1584
1585         c->is_poisoned = true;
1586         if (c->iface->poison_channel(c->iface, c->channel_id)) {
1587                 pr_err("Cannot stop channel %d of mdev %s\n", c->channel_id,
1588                        c->iface->description);
1589                 mutex_unlock(&c->start_mutex);
1590                 return -EAGAIN;
1591         }
1592         flush_trash_fifo(c);
1593         flush_channel_fifos(c);
1594
1595 #ifdef CMPL_INTERRUPTIBLE
1596         if (wait_for_completion_interruptible(&c->cleanup)) {
1597                 pr_info("Interrupted while clean up ch %d\n", c->channel_id);
1598                 mutex_unlock(&c->start_mutex);
1599                 return -EINTR;
1600         }
1601 #else
1602         wait_for_completion(&c->cleanup);
1603 #endif
1604         c->is_poisoned = false;
1605
1606 out:
1607         if (aim == c->aim0.ptr)
1608                 c->aim0.refs--;
1609         if (aim == c->aim1.ptr)
1610                 c->aim1.refs--;
1611         mutex_unlock(&c->start_mutex);
1612         return 0;
1613 }
1614 EXPORT_SYMBOL_GPL(most_stop_channel);
1615
1616 /**
1617  * most_register_aim - registers an AIM (driver) with the core
1618  * @aim: instance of AIM to be registered
1619  */
1620 int most_register_aim(struct most_aim *aim)
1621 {
1622         struct most_aim_obj *aim_obj;
1623
1624         if (!aim) {
1625                 pr_err("Bad driver\n");
1626                 return -EINVAL;
1627         }
1628         aim_obj = create_most_aim_obj(aim->name);
1629         if (!aim_obj) {
1630                 pr_info("failed to alloc driver object\n");
1631                 return -ENOMEM;
1632         }
1633         aim_obj->driver = aim;
1634         aim->context = aim_obj;
1635         pr_info("registered new application interfacing module %s\n",
1636                 aim->name);
1637         list_add_tail(&aim_obj->list, &aim_list);
1638         return 0;
1639 }
1640 EXPORT_SYMBOL_GPL(most_register_aim);
1641
1642 /**
1643  * most_deregister_aim - deregisters an AIM (driver) with the core
1644  * @aim: AIM to be removed
1645  */
1646 int most_deregister_aim(struct most_aim *aim)
1647 {
1648         struct most_aim_obj *aim_obj;
1649         struct most_c_obj *c, *tmp;
1650         struct most_inst_obj *i, *i_tmp;
1651
1652         if (!aim) {
1653                 pr_err("Bad driver\n");
1654                 return -EINVAL;
1655         }
1656
1657         aim_obj = aim->context;
1658         if (!aim_obj) {
1659                 pr_info("driver not registered.\n");
1660                 return -EINVAL;
1661         }
1662         list_for_each_entry_safe(i, i_tmp, &instance_list, list) {
1663                 list_for_each_entry_safe(c, tmp, &i->channel_list, list) {
1664                         if (c->aim0.ptr == aim || c->aim1.ptr == aim)
1665                                 aim->disconnect_channel(
1666                                         c->iface, c->channel_id);
1667                         if (c->aim0.ptr == aim)
1668                                 c->aim0.ptr = NULL;
1669                         if (c->aim1.ptr == aim)
1670                                 c->aim1.ptr = NULL;
1671                 }
1672         }
1673         list_del(&aim_obj->list);
1674         destroy_most_aim_obj(aim_obj);
1675         pr_info("deregistering application interfacing module %s\n", aim->name);
1676         return 0;
1677 }
1678 EXPORT_SYMBOL_GPL(most_deregister_aim);
1679
1680 /**
1681  * most_register_interface - registers an interface with core
1682  * @iface: pointer to the instance of the interface description.
1683  *
1684  * Allocates and initializes a new interface instance and all of its channels.
1685  * Returns a pointer to kobject or an error pointer.
1686  */
1687 struct kobject *most_register_interface(struct most_interface *iface)
1688 {
1689         unsigned int i;
1690         int id;
1691         char name[STRING_SIZE];
1692         char channel_name[STRING_SIZE];
1693         struct most_c_obj *c;
1694         struct most_inst_obj *inst;
1695
1696         if (!iface || !iface->enqueue || !iface->configure ||
1697             !iface->poison_channel || (iface->num_channels > MAX_CHANNELS)) {
1698                 pr_err("Bad interface or channel overflow\n");
1699                 return ERR_PTR(-EINVAL);
1700         }
1701
1702         id = ida_simple_get(&mdev_id, 0, 0, GFP_KERNEL);
1703         if (id < 0) {
1704                 pr_info("Failed to alloc mdev ID\n");
1705                 return ERR_PTR(id);
1706         }
1707         snprintf(name, STRING_SIZE, "mdev%d", id);
1708
1709         inst = create_most_inst_obj(name);
1710         if (!inst) {
1711                 pr_info("Failed to allocate interface instance\n");
1712                 ida_simple_remove(&mdev_id, id);
1713                 return ERR_PTR(-ENOMEM);
1714         }
1715
1716         iface->priv = inst;
1717         INIT_LIST_HEAD(&inst->channel_list);
1718         inst->iface = iface;
1719         inst->dev_id = id;
1720         list_add_tail(&inst->list, &instance_list);
1721
1722         for (i = 0; i < iface->num_channels; i++) {
1723                 const char *name_suffix = iface->channel_vector[i].name_suffix;
1724
1725                 if (!name_suffix)
1726                         snprintf(channel_name, STRING_SIZE, "ch%d", i);
1727                 else if (name_suffix[0] == '@')
1728                         snprintf(channel_name, STRING_SIZE, "ch%d%s", i,
1729                                  name_suffix);
1730                 else
1731                         snprintf(channel_name, STRING_SIZE, "%s", name_suffix);
1732
1733                 /* this increments the reference count of this instance */
1734                 c = create_most_c_obj(channel_name, &inst->kobj);
1735                 if (!c)
1736                         goto free_instance;
1737                 inst->channel[i] = c;
1738                 c->is_starving = 0;
1739                 c->iface = iface;
1740                 c->inst = inst;
1741                 c->channel_id = i;
1742                 c->keep_mbo = false;
1743                 c->enqueue_halt = false;
1744                 c->is_poisoned = false;
1745                 c->cfg.direction = 0;
1746                 c->cfg.data_type = 0;
1747                 c->cfg.num_buffers = 0;
1748                 c->cfg.buffer_size = 0;
1749                 c->cfg.subbuffer_size = 0;
1750                 c->cfg.packets_per_xact = 0;
1751                 spin_lock_init(&c->fifo_lock);
1752                 INIT_LIST_HEAD(&c->fifo);
1753                 INIT_LIST_HEAD(&c->trash_fifo);
1754                 INIT_LIST_HEAD(&c->halt_fifo);
1755                 init_completion(&c->cleanup);
1756                 atomic_set(&c->mbo_ref, 0);
1757                 mutex_init(&c->start_mutex);
1758                 list_add_tail(&c->list, &inst->channel_list);
1759         }
1760         pr_info("registered new MOST device mdev%d (%s)\n",
1761                 inst->dev_id, iface->description);
1762         return &inst->kobj;
1763
1764 free_instance:
1765         pr_info("Failed allocate channel(s)\n");
1766         list_del(&inst->list);
1767         ida_simple_remove(&mdev_id, id);
1768         destroy_most_inst_obj(inst);
1769         return ERR_PTR(-ENOMEM);
1770 }
1771 EXPORT_SYMBOL_GPL(most_register_interface);
1772
1773 /**
1774  * most_deregister_interface - deregisters an interface with core
1775  * @iface: pointer to the interface instance description.
1776  *
1777  * Before removing an interface instance from the list, all running
1778  * channels are stopped and poisoned.
1779  */
1780 void most_deregister_interface(struct most_interface *iface)
1781 {
1782         struct most_inst_obj *i = iface->priv;
1783         struct most_c_obj *c;
1784
1785         if (unlikely(!i)) {
1786                 pr_info("Bad Interface\n");
1787                 return;
1788         }
1789         pr_info("deregistering MOST device %s (%s)\n", i->kobj.name,
1790                 iface->description);
1791
1792         list_for_each_entry(c, &i->channel_list, list) {
1793                 if (c->aim0.ptr)
1794                         c->aim0.ptr->disconnect_channel(c->iface,
1795                                                         c->channel_id);
1796                 if (c->aim1.ptr)
1797                         c->aim1.ptr->disconnect_channel(c->iface,
1798                                                         c->channel_id);
1799                 c->aim0.ptr = NULL;
1800                 c->aim1.ptr = NULL;
1801         }
1802
1803         ida_simple_remove(&mdev_id, i->dev_id);
1804         list_del(&i->list);
1805         destroy_most_inst_obj(i);
1806 }
1807 EXPORT_SYMBOL_GPL(most_deregister_interface);
1808
1809 /**
1810  * most_stop_enqueue - prevents core from enqueueing MBOs
1811  * @iface: pointer to interface
1812  * @id: channel id
1813  *
1814  * This is called by an HDM that _cannot_ attend to its duties and
1815  * is imminent to get run over by the core. The core is not going to
1816  * enqueue any further packets unless the flagging HDM calls
1817  * most_resume enqueue().
1818  */
1819 void most_stop_enqueue(struct most_interface *iface, int id)
1820 {
1821         struct most_c_obj *c = get_channel_by_iface(iface, id);
1822
1823         if (likely(c))
1824                 c->enqueue_halt = true;
1825 }
1826 EXPORT_SYMBOL_GPL(most_stop_enqueue);
1827
1828 /**
1829  * most_resume_enqueue - allow core to enqueue MBOs again
1830  * @iface: pointer to interface
1831  * @id: channel id
1832  *
1833  * This clears the enqueue halt flag and enqueues all MBOs currently
1834  * sitting in the wait fifo.
1835  */
1836 void most_resume_enqueue(struct most_interface *iface, int id)
1837 {
1838         struct most_c_obj *c = get_channel_by_iface(iface, id);
1839
1840         if (unlikely(!c))
1841                 return;
1842         c->enqueue_halt = false;
1843
1844         wake_up_interruptible(&c->hdm_fifo_wq);
1845 }
1846 EXPORT_SYMBOL_GPL(most_resume_enqueue);
1847
1848 static int __init most_init(void)
1849 {
1850         pr_info("init()\n");
1851         INIT_LIST_HEAD(&instance_list);
1852         INIT_LIST_HEAD(&aim_list);
1853         ida_init(&mdev_id);
1854
1855         if (bus_register(&most_bus)) {
1856                 pr_info("Cannot register most bus\n");
1857                 goto exit;
1858         }
1859
1860         most_class = class_create(THIS_MODULE, "most");
1861         if (IS_ERR(most_class)) {
1862                 pr_info("No udev support.\n");
1863                 goto exit_bus;
1864         }
1865         if (driver_register(&mostcore)) {
1866                 pr_info("Cannot register core driver\n");
1867                 goto exit_class;
1868         }
1869
1870         class_glue_dir =
1871                 device_create(most_class, NULL, 0, NULL, "mostcore");
1872         if (!class_glue_dir)
1873                 goto exit_driver;
1874
1875         most_aim_kset =
1876                 kset_create_and_add("aims", NULL, &class_glue_dir->kobj);
1877         if (!most_aim_kset)
1878                 goto exit_class_container;
1879
1880         most_inst_kset =
1881                 kset_create_and_add("devices", NULL, &class_glue_dir->kobj);
1882         if (!most_inst_kset)
1883                 goto exit_driver_kset;
1884
1885         return 0;
1886
1887 exit_driver_kset:
1888         kset_unregister(most_aim_kset);
1889 exit_class_container:
1890         device_destroy(most_class, 0);
1891 exit_driver:
1892         driver_unregister(&mostcore);
1893 exit_class:
1894         class_destroy(most_class);
1895 exit_bus:
1896         bus_unregister(&most_bus);
1897 exit:
1898         return -ENOMEM;
1899 }
1900
1901 static void __exit most_exit(void)
1902 {
1903         struct most_inst_obj *i, *i_tmp;
1904         struct most_aim_obj *d, *d_tmp;
1905
1906         pr_info("exit core module\n");
1907         list_for_each_entry_safe(d, d_tmp, &aim_list, list) {
1908                 destroy_most_aim_obj(d);
1909         }
1910
1911         list_for_each_entry_safe(i, i_tmp, &instance_list, list) {
1912                 list_del(&i->list);
1913                 destroy_most_inst_obj(i);
1914         }
1915         kset_unregister(most_inst_kset);
1916         kset_unregister(most_aim_kset);
1917         device_destroy(most_class, 0);
1918         driver_unregister(&mostcore);
1919         class_destroy(most_class);
1920         bus_unregister(&most_bus);
1921         ida_destroy(&mdev_id);
1922 }
1923
1924 module_init(most_init);
1925 module_exit(most_exit);
1926 MODULE_LICENSE("GPL");
1927 MODULE_AUTHOR("Christian Gromm <christian.gromm@microchip.com>");
1928 MODULE_DESCRIPTION("Core module of stacked MOST Linux driver");