f58d6d8891121457122217dd59d08c08b470b883
[AGL/meta-agl-devel.git] /
1 From 9ffe778acc541cec68c954f84c6fcfef8a35bec2 Mon Sep 17 00:00:00 2001
2 From: Igor Skalkin <igor.skalkin@opensynergy.com>
3 Date: Thu, 5 Nov 2020 22:21:09 +0100
4 Subject: [PATCH] firmware: arm_scmi: Add op to override max message #
5
6 The number of messages that the upcoming scmi-virtio transport can
7 support depends on the virtio device (SCMI platform) and can differ for
8 each channel. (The scmi-virtio transport does only have one tx and at
9 most 1 rx channel.)
10
11 Add an optional transport op so that scmi-virtio can report the actual
12 max message # for each channel type. Respect these new limits.
13
14 Co-developed-by: Peter Hilber <peter.hilber@opensynergy.com>
15 Signed-off-by: Peter Hilber <peter.hilber@opensynergy.com>
16 Signed-off-by: Igor Skalkin <igor.skalkin@opensynergy.com>
17 Signed-off-by: Vasyl Vavrychuk <vasyl.vavrychuk@opensynergy.com>
18 ---
19  drivers/firmware/arm_scmi/common.h |  8 ++++-
20  drivers/firmware/arm_scmi/driver.c | 49 ++++++++++++++++++++++--------
21  2 files changed, 43 insertions(+), 14 deletions(-)
22
23 diff --git a/drivers/firmware/arm_scmi/common.h b/drivers/firmware/arm_scmi/common.h
24 index 38e6aabbe3dd..9a8359ecd220 100644
25 --- a/drivers/firmware/arm_scmi/common.h
26 +++ b/drivers/firmware/arm_scmi/common.h
27 @@ -203,6 +203,9 @@ struct scmi_chan_info {
28   * @chan_available: Callback to check if channel is available or not
29   * @chan_setup: Callback to allocate and setup a channel
30   * @chan_free: Callback to free a channel
31 + * @get_max_msg: Optional callback to provide max_msg dynamically
32 + *     @max_msg: Maximum number of messages for the channel type (tx or rx)
33 + *             that can be pending simultaneously in the system
34   * @send_message: Callback to send a message
35   * @mark_txdone: Callback to mark tx as done
36   * @fetch_response: Callback to fetch response
37 @@ -215,6 +218,8 @@ struct scmi_transport_ops {
38         int (*chan_setup)(struct scmi_chan_info *cinfo, struct device *dev,
39                           bool tx);
40         int (*chan_free)(int id, void *p, void *data);
41 +       int (*get_max_msg)(bool tx, struct scmi_chan_info *base_cinfo,
42 +                          int *max_msg);
43         int (*send_message)(struct scmi_chan_info *cinfo,
44                             struct scmi_xfer *xfer);
45         void (*mark_txdone)(struct scmi_chan_info *cinfo, int ret);
46 @@ -232,7 +237,8 @@ struct scmi_transport_ops {
47   * @ops: Pointer to the transport specific ops structure
48   * @max_rx_timeout_ms: Timeout for communication with SoC (in Milliseconds)
49   * @max_msg: Maximum number of messages for a channel type (tx or rx) that can
50 - *     be pending simultaneously in the system
51 + *     be pending simultaneously in the system. May be overridden by the
52 + *     get_max_msg op.
53   * @max_msg_size: Maximum size of data per message that can be handled.
54   */
55  struct scmi_desc {
56 diff --git a/drivers/firmware/arm_scmi/driver.c b/drivers/firmware/arm_scmi/driver.c
57 index 7efbf66f117b..5baa23789a49 100644
58 --- a/drivers/firmware/arm_scmi/driver.c
59 +++ b/drivers/firmware/arm_scmi/driver.c
60 @@ -61,11 +61,13 @@ static atomic_t transfer_last_id;
61   *     Index of this bitmap table is also used for message
62   *     sequence identifier.
63   * @xfer_lock: Protection for message allocation
64 + * @max_msg: Maximum number of messages that can be pending
65   */
66  struct scmi_xfers_info {
67         struct scmi_xfer *xfer_block;
68         unsigned long *xfer_alloc_table;
69         spinlock_t xfer_lock;
70 +       int max_msg;
71  };
72  
73  /**
74 @@ -157,13 +159,11 @@ static struct scmi_xfer *scmi_xfer_get(const struct scmi_handle *handle,
75         u16 xfer_id;
76         struct scmi_xfer *xfer;
77         unsigned long flags, bit_pos;
78 -       struct scmi_info *info = handle_to_scmi_info(handle);
79  
80         /* Keep the locked section as small as possible */
81         spin_lock_irqsave(&minfo->xfer_lock, flags);
82 -       bit_pos = find_first_zero_bit(minfo->xfer_alloc_table,
83 -                                     info->desc->max_msg);
84 -       if (bit_pos == info->desc->max_msg) {
85 +       bit_pos = find_first_zero_bit(minfo->xfer_alloc_table, minfo->max_msg);
86 +       if (bit_pos == minfo->max_msg) {
87                 spin_unlock_irqrestore(&minfo->xfer_lock, flags);
88                 return ERR_PTR(-ENOMEM);
89         }
90 @@ -602,32 +602,44 @@ int scmi_handle_put(const struct scmi_handle *handle)
91  }
92  
93  static int __scmi_xfer_info_init(struct scmi_info *sinfo,
94 -                                struct scmi_xfers_info *info)
95 +                                struct scmi_xfers_info *info,
96 +                                bool tx,
97 +                                struct scmi_chan_info *base_cinfo)
98  {
99         int i;
100         struct scmi_xfer *xfer;
101         struct device *dev = sinfo->dev;
102         const struct scmi_desc *desc = sinfo->desc;
103  
104 +       info->max_msg = desc->max_msg;
105 +
106 +       if (desc->ops->get_max_msg) {
107 +               int ret =
108 +                       desc->ops->get_max_msg(tx, base_cinfo, &info->max_msg);
109 +
110 +               if (ret)
111 +                       return ret;
112 +       }
113 +
114         /* Pre-allocated messages, no more than what hdr.seq can support */
115 -       if (WARN_ON(desc->max_msg >= MSG_TOKEN_MAX)) {
116 +       if (WARN_ON(info->max_msg >= MSG_TOKEN_MAX)) {
117                 dev_err(dev, "Maximum message of %d exceeds supported %ld\n",
118 -                       desc->max_msg, MSG_TOKEN_MAX);
119 +                       info->max_msg, MSG_TOKEN_MAX);
120                 return -EINVAL;
121         }
122  
123 -       info->xfer_block = devm_kcalloc(dev, desc->max_msg,
124 +       info->xfer_block = devm_kcalloc(dev, info->max_msg,
125                                         sizeof(*info->xfer_block), GFP_KERNEL);
126         if (!info->xfer_block)
127                 return -ENOMEM;
128  
129 -       info->xfer_alloc_table = devm_kcalloc(dev, BITS_TO_LONGS(desc->max_msg),
130 +       info->xfer_alloc_table = devm_kcalloc(dev, BITS_TO_LONGS(info->max_msg),
131                                               sizeof(long), GFP_KERNEL);
132         if (!info->xfer_alloc_table)
133                 return -ENOMEM;
134  
135         /* Pre-initialize the buffer pointer to pre-allocated buffers */
136 -       for (i = 0, xfer = info->xfer_block; i < desc->max_msg; i++, xfer++) {
137 +       for (i = 0, xfer = info->xfer_block; i < info->max_msg; i++, xfer++) {
138                 xfer->rx.buf = devm_kcalloc(dev, sizeof(u8), desc->max_msg_size,
139                                             GFP_KERNEL);
140                 if (!xfer->rx.buf)
141 @@ -644,10 +656,21 @@ static int __scmi_xfer_info_init(struct scmi_info *sinfo,
142  
143  static int scmi_xfer_info_init(struct scmi_info *sinfo)
144  {
145 -       int ret = __scmi_xfer_info_init(sinfo, &sinfo->tx_minfo);
146 +       int ret;
147 +       struct scmi_chan_info *base_tx_cinfo;
148 +       struct scmi_chan_info *base_rx_cinfo;
149 +
150 +       base_tx_cinfo = idr_find(&sinfo->tx_idr, SCMI_PROTOCOL_BASE);
151 +       if (unlikely(!base_tx_cinfo))
152 +               return -EINVAL;
153 +
154 +       ret = __scmi_xfer_info_init(sinfo, &sinfo->tx_minfo, true,
155 +                                   base_tx_cinfo);
156  
157 -       if (!ret && idr_find(&sinfo->rx_idr, SCMI_PROTOCOL_BASE))
158 -               ret = __scmi_xfer_info_init(sinfo, &sinfo->rx_minfo);
159 +       base_rx_cinfo = idr_find(&sinfo->rx_idr, SCMI_PROTOCOL_BASE);
160 +       if (!ret && base_rx_cinfo)
161 +               ret = __scmi_xfer_info_init(sinfo, &sinfo->rx_minfo, false,
162 +                                           base_rx_cinfo);
163  
164         return ret;
165  }