1 From fa0e2362149bb814d6b7431a7c42989d33002f60 Mon Sep 17 00:00:00 2001
2 From: Mans Rullgard <mans@mansr.com>
3 Date: Thu, 26 May 2016 16:04:02 +0100
4 Subject: [PATCH 13/32] ivshmem-net: virtual network device for Jailhouse
8 drivers/net/Kconfig | 4 +
9 drivers/net/Makefile | 2 +
10 drivers/net/ivshmem-net.c | 923 ++++++++++++++++++++++++++++++++++++++++++++++
11 3 files changed, 929 insertions(+)
12 create mode 100644 drivers/net/ivshmem-net.c
14 diff --git a/drivers/net/Kconfig b/drivers/net/Kconfig
15 index df1c7989e13d..8c65f55163e3 100644
16 --- a/drivers/net/Kconfig
17 +++ b/drivers/net/Kconfig
18 @@ -527,4 +527,8 @@ config NET_FAILOVER
19 a VM with direct attached VF by failing over to the paravirtual
20 datapath when the VF is unplugged.
23 + tristate "IVSHMEM virtual network device"
27 diff --git a/drivers/net/Makefile b/drivers/net/Makefile
28 index 0d3ba056cda3..5041c293d4d0 100644
29 --- a/drivers/net/Makefile
30 +++ b/drivers/net/Makefile
31 @@ -79,3 +79,5 @@ thunderbolt-net-y += thunderbolt.o
32 obj-$(CONFIG_THUNDERBOLT_NET) += thunderbolt-net.o
33 obj-$(CONFIG_NETDEVSIM) += netdevsim/
34 obj-$(CONFIG_NET_FAILOVER) += net_failover.o
36 +obj-$(CONFIG_IVSHMEM_NET) += ivshmem-net.o
37 diff --git a/drivers/net/ivshmem-net.c b/drivers/net/ivshmem-net.c
39 index 000000000000..b676bed2cc2e
41 +++ b/drivers/net/ivshmem-net.c
44 + * Copyright 2016 Mans Rullgard <mans@mansr.com>
46 + * This program is free software; you can redistribute it and/or modify
47 + * it under the terms of the GNU General Public License as published by
48 + * the Free Software Foundation; either version 2 of the License, or
49 + * (at your option) any later version.
51 + * This program is distributed in the hope that it will be useful,
52 + * but WITHOUT ANY WARRANTY; without even the implied warranty of
53 + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
54 + * GNU General Public License for more details.
56 + * You should have received a copy of the GNU General Public License
57 + * along with this program; if not, see <http://www.gnu.org/licenses/>.
60 +#include <linux/kernel.h>
61 +#include <linux/module.h>
62 +#include <linux/pci.h>
63 +#include <linux/io.h>
64 +#include <linux/bitops.h>
65 +#include <linux/interrupt.h>
66 +#include <linux/netdevice.h>
67 +#include <linux/etherdevice.h>
68 +#include <linux/rtnetlink.h>
69 +#include <linux/virtio_ring.h>
71 +#define DRV_NAME "ivshmem-net"
73 +#define JAILHOUSE_CFG_SHMEM_PTR 0x40
74 +#define JAILHOUSE_CFG_SHMEM_SZ 0x48
76 +#define IVSHM_NET_STATE_RESET 0
77 +#define IVSHM_NET_STATE_INIT 1
78 +#define IVSHM_NET_STATE_READY 2
79 +#define IVSHM_NET_STATE_RUN 3
81 +#define IVSHM_NET_MTU_MIN 256
82 +#define IVSHM_NET_MTU_MAX 65535
83 +#define IVSHM_NET_MTU_DEF 16384
85 +#define IVSHM_NET_FRAME_SIZE(s) ALIGN(18 + (s), SMP_CACHE_BYTES)
87 +#define IVSHM_NET_VQ_ALIGN 64
89 +struct ivshmem_regs {
98 +struct ivshm_net_queue {
103 + u16 last_avail_idx;
113 +struct ivshm_net_stats {
122 + u32 napi_poll_n[10];
126 + struct ivshm_net_queue rx;
127 + struct ivshm_net_queue tx;
133 + spinlock_t tx_free_lock;
134 + spinlock_t tx_clean_lock;
136 + struct napi_struct napi;
141 + struct workqueue_struct *state_wq;
142 + struct work_struct state_work;
144 + struct ivshm_net_stats stats;
146 + struct ivshmem_regs __iomem *ivshm_regs;
148 + phys_addr_t shmaddr;
149 + resource_size_t shmlen;
152 + struct pci_dev *pdev;
153 + struct msix_entry msix;
157 +static void *ivshm_net_desc_data(struct ivshm_net *in,
158 + struct ivshm_net_queue *q,
159 + struct vring_desc *desc,
162 + u64 addr = READ_ONCE(desc->addr);
163 + u32 dlen = READ_ONCE(desc->len);
166 + if (addr < in->shmaddr || desc->addr > in->shmaddr + in->shmlen)
169 + data = in->shm + (addr - in->shmaddr);
171 + if (data < q->data || data >= q->end)
174 + if (dlen > q->end - data)
182 +static void ivshm_net_init_queue(struct ivshm_net *in,
183 + struct ivshm_net_queue *q,
184 + void *mem, unsigned int len)
186 + memset(q, 0, sizeof(*q));
188 + vring_init(&q->vr, len, mem, IVSHM_NET_VQ_ALIGN);
189 + q->data = mem + in->vrsize;
190 + q->end = q->data + in->qsize;
191 + q->size = in->qsize;
194 +static void ivshm_net_init_queues(struct net_device *ndev)
196 + struct ivshm_net *in = netdev_priv(ndev);
197 + int ivpos = readl(&in->ivshm_regs->ivpos);
202 + tx = in->shm + ivpos * in->shmlen / 2;
203 + rx = in->shm + !ivpos * in->shmlen / 2;
205 + memset(tx, 0, in->shmlen / 2);
207 + ivshm_net_init_queue(in, &in->rx, rx, in->qlen);
208 + ivshm_net_init_queue(in, &in->tx, tx, in->qlen);
210 + swap(in->rx.vr.used, in->tx.vr.used);
212 + in->tx.num_free = in->tx.vr.num;
214 + for (i = 0; i < in->tx.vr.num - 1; i++)
215 + in->tx.vr.desc[i].next = i + 1;
218 +static int ivshm_net_calc_qsize(struct net_device *ndev)
220 + struct ivshm_net *in = netdev_priv(ndev);
221 + unsigned int vrsize;
222 + unsigned int qsize;
225 + for (qlen = 4096; qlen > 32; qlen >>= 1) {
226 + vrsize = vring_size(qlen, IVSHM_NET_VQ_ALIGN);
227 + vrsize = ALIGN(vrsize, IVSHM_NET_VQ_ALIGN);
228 + if (vrsize < in->shmlen / 16)
232 + if (vrsize > in->shmlen / 2)
235 + qsize = in->shmlen / 2 - vrsize;
237 + if (qsize < 4 * IVSHM_NET_MTU_MIN)
240 + in->vrsize = vrsize;
247 +static void ivshm_net_notify_tx(struct ivshm_net *in, unsigned int num)
253 + evt = READ_ONCE(vring_avail_event(&in->tx.vr));
254 + old = in->tx.last_avail_idx - num;
255 + new = in->tx.last_avail_idx;
257 + if (vring_need_event(evt, new, old)) {
258 + writel(in->peer_id << 16, &in->ivshm_regs->doorbell);
259 + in->stats.tx_notify++;
263 +static void ivshm_net_enable_rx_irq(struct ivshm_net *in)
265 + vring_avail_event(&in->rx.vr) = in->rx.last_avail_idx;
269 +static void ivshm_net_notify_rx(struct ivshm_net *in, unsigned int num)
275 + evt = vring_used_event(&in->rx.vr);
276 + old = in->rx.last_used_idx - num;
277 + new = in->rx.last_used_idx;
279 + if (vring_need_event(evt, new, old)) {
280 + writel(in->peer_id << 16, &in->ivshm_regs->doorbell);
281 + in->stats.rx_notify++;
285 +static void ivshm_net_enable_tx_irq(struct ivshm_net *in)
287 + vring_used_event(&in->tx.vr) = in->tx.last_used_idx;
291 +static bool ivshm_net_rx_avail(struct ivshm_net *in)
294 + return READ_ONCE(in->rx.vr.avail->idx) != in->rx.last_avail_idx;
297 +static size_t ivshm_net_tx_space(struct ivshm_net *in)
299 + struct ivshm_net_queue *tx = &in->tx;
300 + u32 tail = tx->tail;
301 + u32 head = tx->head;
305 + space = tail - head;
307 + space = max(tx->size - head, tail);
312 +static bool ivshm_net_tx_ok(struct ivshm_net *in, unsigned int mtu)
314 + return in->tx.num_free >= 2 &&
315 + ivshm_net_tx_space(in) >= 2 * IVSHM_NET_FRAME_SIZE(mtu);
318 +static u32 ivshm_net_tx_advance(struct ivshm_net_queue *q, u32 *pos, u32 len)
322 + len = IVSHM_NET_FRAME_SIZE(len);
324 + if (q->size - p < len)
331 +static int ivshm_net_tx_frame(struct net_device *ndev, struct sk_buff *skb)
333 + struct ivshm_net *in = netdev_priv(ndev);
334 + struct ivshm_net_queue *tx = &in->tx;
335 + struct vring *vr = &tx->vr;
336 + struct vring_desc *desc;
337 + unsigned int desc_idx;
338 + unsigned int avail;
342 + BUG_ON(tx->num_free < 1);
344 + spin_lock(&in->tx_free_lock);
345 + desc_idx = tx->free_head;
346 + desc = &vr->desc[desc_idx];
347 + tx->free_head = desc->next;
349 + spin_unlock(&in->tx_free_lock);
351 + head = ivshm_net_tx_advance(tx, &tx->head, skb->len);
353 + buf = tx->data + head;
354 + skb_copy_and_csum_dev(skb, buf);
356 + desc->addr = in->shmaddr + (buf - in->shm);
357 + desc->len = skb->len;
359 + avail = tx->last_avail_idx++ & (vr->num - 1);
360 + vr->avail->ring[avail] = desc_idx;
363 + if (!skb->xmit_more) {
364 + virt_store_release(&vr->avail->idx, tx->last_avail_idx);
365 + ivshm_net_notify_tx(in, tx->num_added);
372 +static void ivshm_net_tx_clean(struct net_device *ndev)
374 + struct ivshm_net *in = netdev_priv(ndev);
375 + struct ivshm_net_queue *tx = &in->tx;
376 + struct vring *vr = &tx->vr;
377 + struct vring_desc *desc;
378 + struct vring_desc *fdesc;
385 + if (!spin_trylock(&in->tx_clean_lock))
388 + used_idx = virt_load_acquire(&vr->used->idx);
389 + last = tx->last_used_idx;
394 + while (last != used_idx) {
399 + used = vr->used->ring[last & (vr->num - 1)].id;
400 + if (used >= vr->num) {
401 + netdev_err(ndev, "invalid tx used %d\n", used);
405 + desc = &vr->desc[used];
407 + data = ivshm_net_desc_data(in, &in->tx, desc, &len);
409 + netdev_err(ndev, "bad tx descriptor\n");
413 + tail = ivshm_net_tx_advance(tx, &tx->tail, len);
414 + if (data != tx->data + tail) {
415 + netdev_err(ndev, "bad tx descriptor\n");
422 + desc->next = fhead;
429 + tx->last_used_idx = last;
431 + spin_unlock(&in->tx_clean_lock);
434 + spin_lock(&in->tx_free_lock);
435 + fdesc->next = tx->free_head;
436 + tx->free_head = fhead;
437 + tx->num_free += num;
438 + BUG_ON(tx->num_free > vr->num);
439 + spin_unlock(&in->tx_free_lock);
443 +static struct vring_desc *ivshm_net_rx_desc(struct net_device *ndev)
445 + struct ivshm_net *in = netdev_priv(ndev);
446 + struct ivshm_net_queue *rx = &in->rx;
447 + struct vring *vr = &rx->vr;
448 + unsigned int avail;
451 + avail_idx = virt_load_acquire(&vr->avail->idx);
453 + if (avail_idx == rx->last_avail_idx)
456 + avail = vr->avail->ring[rx->last_avail_idx++ & (vr->num - 1)];
457 + if (avail >= vr->num) {
458 + netdev_err(ndev, "invalid rx avail %d\n", avail);
462 + return &vr->desc[avail];
465 +static void ivshm_net_rx_finish(struct ivshm_net *in, struct vring_desc *desc)
467 + struct ivshm_net_queue *rx = &in->rx;
468 + struct vring *vr = &rx->vr;
469 + unsigned int desc_id = desc - vr->desc;
472 + used = rx->last_used_idx++ & (vr->num - 1);
473 + vr->used->ring[used].id = desc_id;
475 + virt_store_release(&vr->used->idx, rx->last_used_idx);
478 +static int ivshm_net_poll(struct napi_struct *napi, int budget)
480 + struct net_device *ndev = napi->dev;
481 + struct ivshm_net *in = container_of(napi, struct ivshm_net, napi);
484 + in->stats.napi_poll++;
486 + ivshm_net_tx_clean(ndev);
488 + while (received < budget) {
489 + struct vring_desc *desc;
490 + struct sk_buff *skb;
494 + desc = ivshm_net_rx_desc(ndev);
498 + data = ivshm_net_desc_data(in, &in->rx, desc, &len);
500 + netdev_err(ndev, "bad rx descriptor\n");
504 + skb = napi_alloc_skb(napi, len);
507 + memcpy(skb_put(skb, len), data, len);
508 + skb->protocol = eth_type_trans(skb, ndev);
509 + napi_gro_receive(napi, skb);
512 + ndev->stats.rx_packets++;
513 + ndev->stats.rx_bytes += len;
515 + ivshm_net_rx_finish(in, desc);
519 + if (received < budget) {
520 + in->stats.napi_complete++;
521 + napi_complete_done(napi, received);
522 + ivshm_net_enable_rx_irq(in);
523 + if (ivshm_net_rx_avail(in))
524 + napi_schedule(napi);
528 + ivshm_net_notify_rx(in, received);
530 + in->stats.rx_packets += received;
531 + in->stats.napi_poll_n[received ? 1 + min(ilog2(received), 8) : 0]++;
533 + if (ivshm_net_tx_ok(in, ndev->mtu))
534 + netif_wake_queue(ndev);
539 +static netdev_tx_t ivshm_net_xmit(struct sk_buff *skb, struct net_device *ndev)
541 + struct ivshm_net *in = netdev_priv(ndev);
543 + ivshm_net_tx_clean(ndev);
545 + if (!ivshm_net_tx_ok(in, ndev->mtu)) {
546 + ivshm_net_enable_tx_irq(in);
547 + netif_stop_queue(ndev);
548 + skb->xmit_more = 0;
549 + in->stats.tx_pause++;
552 + ivshm_net_tx_frame(ndev, skb);
554 + in->stats.tx_packets++;
555 + ndev->stats.tx_packets++;
556 + ndev->stats.tx_bytes += skb->len;
558 + dev_consume_skb_any(skb);
560 + return NETDEV_TX_OK;
563 +static void ivshm_net_set_state(struct ivshm_net *in, u32 state)
566 + WRITE_ONCE(in->lstate, state);
567 + writel(state, &in->ivshm_regs->lstate);
570 +static void ivshm_net_run(struct net_device *ndev)
572 + struct ivshm_net *in = netdev_priv(ndev);
574 + netif_start_queue(ndev);
575 + napi_enable(&in->napi);
576 + napi_schedule(&in->napi);
577 + ivshm_net_set_state(in, IVSHM_NET_STATE_RUN);
580 +static void ivshm_net_state_change(struct work_struct *work)
582 + struct ivshm_net *in = container_of(work, struct ivshm_net, state_work);
583 + struct net_device *ndev = in->napi.dev;
584 + u32 rstate = readl(&in->ivshm_regs->rstate);
587 + switch (in->lstate) {
588 + case IVSHM_NET_STATE_RESET:
589 + if (rstate < IVSHM_NET_STATE_READY)
590 + ivshm_net_set_state(in, IVSHM_NET_STATE_INIT);
593 + case IVSHM_NET_STATE_INIT:
594 + if (rstate > IVSHM_NET_STATE_RESET) {
595 + ivshm_net_init_queues(ndev);
596 + ivshm_net_set_state(in, IVSHM_NET_STATE_READY);
599 + call_netdevice_notifiers(NETDEV_CHANGEADDR, ndev);
604 + case IVSHM_NET_STATE_READY:
605 + if (rstate >= IVSHM_NET_STATE_READY) {
606 + netif_carrier_on(ndev);
607 + if (ndev->flags & IFF_UP)
608 + ivshm_net_run(ndev);
610 + netif_carrier_off(ndev);
611 + ivshm_net_set_state(in, IVSHM_NET_STATE_RESET);
615 + case IVSHM_NET_STATE_RUN:
616 + if (rstate < IVSHM_NET_STATE_READY) {
617 + netif_stop_queue(ndev);
618 + napi_disable(&in->napi);
619 + netif_carrier_off(ndev);
620 + ivshm_net_set_state(in, IVSHM_NET_STATE_RESET);
626 + WRITE_ONCE(in->rstate, rstate);
629 +static bool ivshm_net_check_state(struct net_device *ndev)
631 + struct ivshm_net *in = netdev_priv(ndev);
632 + u32 rstate = readl(&in->ivshm_regs->rstate);
634 + if (rstate != READ_ONCE(in->rstate) ||
635 + in->lstate != IVSHM_NET_STATE_RUN) {
636 + queue_work(in->state_wq, &in->state_work);
643 +static irqreturn_t ivshm_net_int(int irq, void *data)
645 + struct net_device *ndev = data;
646 + struct ivshm_net *in = netdev_priv(ndev);
648 + in->stats.interrupts++;
650 + ivshm_net_check_state(ndev);
651 + napi_schedule_irqoff(&in->napi);
653 + return IRQ_HANDLED;
656 +static int ivshm_net_open(struct net_device *ndev)
658 + struct ivshm_net *in = netdev_priv(ndev);
660 + netdev_reset_queue(ndev);
661 + ndev->operstate = IF_OPER_UP;
663 + if (in->lstate == IVSHM_NET_STATE_READY)
664 + ivshm_net_run(ndev);
669 +static int ivshm_net_stop(struct net_device *ndev)
671 + struct ivshm_net *in = netdev_priv(ndev);
673 + ndev->operstate = IF_OPER_DOWN;
675 + if (in->lstate == IVSHM_NET_STATE_RUN) {
676 + napi_disable(&in->napi);
677 + netif_stop_queue(ndev);
678 + ivshm_net_set_state(in, IVSHM_NET_STATE_READY);
684 +static int ivshm_net_change_mtu(struct net_device *ndev, int mtu)
686 + struct ivshm_net *in = netdev_priv(ndev);
687 + struct ivshm_net_queue *tx = &in->tx;
689 + if (mtu < IVSHM_NET_MTU_MIN || mtu > IVSHM_NET_MTU_MAX)
692 + if (in->tx.size / mtu < 4)
695 + if (ivshm_net_tx_space(in) < 2 * IVSHM_NET_FRAME_SIZE(mtu))
698 + if (in->tx.size - tx->head < IVSHM_NET_FRAME_SIZE(mtu) &&
699 + tx->head < tx->tail)
702 + netif_tx_lock_bh(ndev);
703 + if (in->tx.size - tx->head < IVSHM_NET_FRAME_SIZE(mtu))
705 + netif_tx_unlock_bh(ndev);
712 +#ifdef CONFIG_NET_POLL_CONTROLLER
713 +static void ivshm_net_poll_controller(struct net_device *ndev)
715 + struct ivshm_net *in = netdev_priv(ndev);
717 + napi_schedule(&in->napi);
721 +static const struct net_device_ops ivshm_net_ops = {
722 + .ndo_open = ivshm_net_open,
723 + .ndo_stop = ivshm_net_stop,
724 + .ndo_start_xmit = ivshm_net_xmit,
725 + .ndo_change_mtu = ivshm_net_change_mtu,
726 +#ifdef CONFIG_NET_POLL_CONTROLLER
727 + .ndo_poll_controller = ivshm_net_poll_controller,
731 +static const char ivshm_net_stats[][ETH_GSTRING_LEN] = {
752 +#define NUM_STATS ARRAY_SIZE(ivshm_net_stats)
754 +static int ivshm_net_get_sset_count(struct net_device *ndev, int sset)
756 + if (sset == ETH_SS_STATS)
759 + return -EOPNOTSUPP;
762 +static void ivshm_net_get_strings(struct net_device *ndev, u32 sset, u8 *buf)
764 + if (sset == ETH_SS_STATS)
765 + memcpy(buf, &ivshm_net_stats, sizeof(ivshm_net_stats));
768 +static void ivshm_net_get_ethtool_stats(struct net_device *ndev,
769 + struct ethtool_stats *estats, u64 *st)
771 + struct ivshm_net *in = netdev_priv(ndev);
772 + unsigned int n = 0;
775 + st[n++] = in->stats.interrupts;
776 + st[n++] = in->stats.tx_packets;
777 + st[n++] = in->stats.tx_notify;
778 + st[n++] = in->stats.tx_pause;
779 + st[n++] = in->stats.rx_packets;
780 + st[n++] = in->stats.rx_notify;
781 + st[n++] = in->stats.napi_poll;
782 + st[n++] = in->stats.napi_complete;
784 + for (i = 0; i < ARRAY_SIZE(in->stats.napi_poll_n); i++)
785 + st[n++] = in->stats.napi_poll_n[i];
787 + memset(&in->stats, 0, sizeof(in->stats));
790 +static const struct ethtool_ops ivshm_net_ethtool_ops = {
791 + .get_sset_count = ivshm_net_get_sset_count,
792 + .get_strings = ivshm_net_get_strings,
793 + .get_ethtool_stats = ivshm_net_get_ethtool_stats,
796 +static int ivshm_net_probe(struct pci_dev *pdev,
797 + const struct pci_device_id *id)
799 + struct net_device *ndev;
800 + struct ivshm_net *in;
801 + struct ivshmem_regs __iomem *regs;
802 + resource_size_t shmaddr;
803 + resource_size_t shmlen;
809 + err = pcim_enable_device(pdev);
811 + dev_err(&pdev->dev, "pci_enable_device: %d\n", err);
815 + err = pcim_iomap_regions(pdev, BIT(0), DRV_NAME);
817 + dev_err(&pdev->dev, "pcim_iomap_regions: %d\n", err);
821 + regs = pcim_iomap_table(pdev)[0];
823 + shmlen = pci_resource_len(pdev, 2);
826 + shmaddr = pci_resource_start(pdev, 2);
828 + union { u64 v; u32 hl[2]; } val;
830 + pci_read_config_dword(pdev, JAILHOUSE_CFG_SHMEM_PTR,
832 + pci_read_config_dword(pdev, JAILHOUSE_CFG_SHMEM_PTR + 4,
836 + pci_read_config_dword(pdev, JAILHOUSE_CFG_SHMEM_SZ,
838 + pci_read_config_dword(pdev, JAILHOUSE_CFG_SHMEM_SZ + 4,
844 + if (!devm_request_mem_region(&pdev->dev, shmaddr, shmlen, DRV_NAME))
847 + shm = devm_memremap(&pdev->dev, shmaddr, shmlen, MEMREMAP_WC);
851 + ivpos = readl(®s->ivpos);
853 + dev_err(&pdev->dev, "invalid IVPosition %d\n", ivpos);
857 + dev_info(&pdev->dev, "shared memory size %pa\n", &shmlen);
859 + ndev = alloc_etherdev(sizeof(*in));
863 + pci_set_drvdata(pdev, ndev);
864 + SET_NETDEV_DEV(ndev, &pdev->dev);
866 + in = netdev_priv(ndev);
867 + in->ivshm_regs = regs;
869 + in->shmaddr = shmaddr;
870 + in->shmlen = shmlen;
871 + in->peer_id = !ivpos;
873 + spin_lock_init(&in->tx_free_lock);
874 + spin_lock_init(&in->tx_clean_lock);
876 + err = ivshm_net_calc_qsize(ndev);
880 + in->state_wq = alloc_ordered_workqueue(DRV_NAME, 0);
884 + INIT_WORK(&in->state_work, ivshm_net_state_change);
886 + eth_random_addr(ndev->dev_addr);
887 + ndev->netdev_ops = &ivshm_net_ops;
888 + ndev->ethtool_ops = &ivshm_net_ethtool_ops;
889 + ndev->mtu = min_t(u32, IVSHM_NET_MTU_DEF, in->qsize / 16);
890 + ndev->hw_features = NETIF_F_HW_CSUM | NETIF_F_SG;
891 + ndev->features = ndev->hw_features;
893 + netif_carrier_off(ndev);
894 + netif_napi_add(ndev, &in->napi, ivshm_net_poll, NAPI_POLL_WEIGHT);
896 + err = register_netdev(ndev);
900 + err = pci_enable_msix(pdev, &in->msix, 1);
902 + interrupt = in->msix.vector;
903 + in->using_msix = true;
905 + interrupt = pdev->irq;
906 + in->using_msix = false;
909 + err = request_irq(interrupt, ivshm_net_int, 0, DRV_NAME, ndev);
913 + pci_set_master(pdev);
915 + writel(IVSHM_NET_STATE_RESET, &in->ivshm_regs->lstate);
920 + if (in->using_msix)
921 + pci_disable_msix(pdev);
922 + unregister_netdev(ndev);
924 + destroy_workqueue(in->state_wq);
931 +static void ivshm_net_remove(struct pci_dev *pdev)
933 + struct net_device *ndev = pci_get_drvdata(pdev);
934 + struct ivshm_net *in = netdev_priv(ndev);
936 + if (in->using_msix) {
937 + free_irq(in->msix.vector, ndev);
938 + pci_disable_msix(pdev);
940 + free_irq(pdev->irq, ndev);
943 + unregister_netdev(ndev);
944 + cancel_work_sync(&in->state_work);
945 + destroy_workqueue(in->state_wq);
949 +static const struct pci_device_id ivshm_net_id_table[] = {
950 + { PCI_DEVICE(PCI_VENDOR_ID_REDHAT_QUMRANET, 0x1110),
951 + (PCI_CLASS_OTHERS << 16) | (0x01 << 8), 0xffff00 },
954 +MODULE_DEVICE_TABLE(pci, ivshm_net_id_table);
956 +static struct pci_driver ivshm_net_driver = {
958 + .id_table = ivshm_net_id_table,
959 + .probe = ivshm_net_probe,
960 + .remove = ivshm_net_remove,
962 +module_pci_driver(ivshm_net_driver);
964 +MODULE_AUTHOR("Mans Rullgard <mans@mansr.com>");
965 +MODULE_LICENSE("GPL");