Super User's BSD Cross Reference: /FreeBSD/usr.sbin/bhyve/virtio.c

1 /*-
2 * SPDX-License-Identifier: BSD-2-Clause-FreeBSD
3 *
4 * Copyright (c) 2013 Chris Torek <torek @ torek net>
5 * All rights reserved.
6 * Copyright (c) 2019 Joyent, Inc.
7 *
8 * Redistribution and use in source and binary forms, with or without
9 * modification, are permitted provided that the following conditions
10 * are met:
11 * 1. Redistributions of source code must retain the above copyright
12 * notice, this list of conditions and the following disclaimer.
13 * 2. Redistributions in binary form must reproduce the above copyright
14 * notice, this list of conditions and the following disclaimer in the
15 * documentation and/or other materials provided with the distribution.
16 *
17 * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
18 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
19 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
20 * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
21 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
22 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
23 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
24 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
25 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
26 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
27 * SUCH DAMAGE.
28 */
29
30#include <sys/cdefs.h>
31 __FBSDID("$FreeBSD$");
32
33#include <sys/param.h>
34#include <sys/uio.h>
35
36#include <machine/atomic.h>
37#include <machine/vmm_snapshot.h>
38
39#include <stdio.h>
40#include <stdint.h>
41#include <pthread.h>
42#include <pthread_np.h>
43
44#include "bhyverun.h"
45#include "debug.h"
46#include "pci_emul.h"
47#include "virtio.h"
48
49 /*
50 * Functions for dealing with generalized "virtual devices" as
51 * defined by <https://www.google.com/#output=search&q=virtio+spec>
52 */
53
54 /*
55 * In case we decide to relax the "virtio softc comes at the
56 * front of virtio-based device softc" constraint, let's use
57 * this to convert.
58 */
59#define DEV_SOFTC(vs) ((void *)(vs))
60
61 /*
62 * Link a virtio_softc to its constants, the device softc, and
63 * the PCI emulation.
64 */
65 void
66 vi_softc_linkup(struct virtio_softc *vs, struct virtio_consts *vc,
67 void *dev_softc, struct pci_devinst *pi,
68 struct vqueue_info *queues)
69{
70 int i;
71
72 /* vs and dev_softc addresses must match */
73 assert((void *)vs == dev_softc);
74 vs->vs_vc = vc;
75 vs->vs_pi = pi;
76 pi->pi_arg = vs;
77
78 vs->vs_queues = queues;
79 for (i = 0; i < vc->vc_nvq; i++) {
80 queues[i].vq_vs = vs;
81 queues[i].vq_num = i;
82 }
83}
84
85 /*
86 * Reset device (device-wide). This erases all queues, i.e.,
87 * all the queues become invalid (though we don't wipe out the
88 * internal pointers, we just clear the VQ_ALLOC flag).
89 *
90 * It resets negotiated features to "none".
91 *
92 * If MSI-X is enabled, this also resets all the vectors to NO_VECTOR.
93 */
94 void
95 vi_reset_dev(struct virtio_softc *vs)
96{
97 struct vqueue_info *vq;
98 int i, nvq;
99
100 if (vs->vs_mtx)
101 assert(pthread_mutex_isowned_np(vs->vs_mtx));
102
103 nvq = vs->vs_vc->vc_nvq;
104 for (vq = vs->vs_queues, i = 0; i < nvq; vq++, i++) {
105 vq->vq_flags = 0;
106 vq->vq_last_avail = 0;
107 vq->vq_next_used = 0;
108 vq->vq_save_used = 0;
109 vq->vq_pfn = 0;
110 vq->vq_msix_idx = VIRTIO_MSI_NO_VECTOR;
111 }
112 vs->vs_negotiated_caps = 0;
113 vs->vs_curq = 0;
114 /* vs->vs_status = 0; -- redundant */
115 if (vs->vs_isr)
116 pci_lintr_deassert(vs->vs_pi);
117 vs->vs_isr = 0;
118 vs->vs_msix_cfg_idx = VIRTIO_MSI_NO_VECTOR;
119}
120
121 /*
122 * Set I/O BAR (usually 0) to map PCI config registers.
123 */
124 void
125 vi_set_io_bar(struct virtio_softc *vs, int barnum)
126{
127 size_t size;
128
129 /*
130 * ??? should we use CFG0 if MSI-X is disabled?
131 * Existing code did not...
132 */
133 size = VTCFG_R_CFG1 + vs->vs_vc->vc_cfgsize;
134 pci_emul_alloc_bar(vs->vs_pi, barnum, PCIBAR_IO, size);
135}
136
137 /*
138 * Initialize MSI-X vector capabilities if we're to use MSI-X,
139 * or MSI capabilities if not.
140 *
141 * We assume we want one MSI-X vector per queue, here, plus one
142 * for the config vec.
143 */
144 int
145 vi_intr_init(struct virtio_softc *vs, int barnum, int use_msix)
146{
147 int nvec;
148
149 if (use_msix) {
150 vs->vs_flags |= VIRTIO_USE_MSIX;
151 VS_LOCK(vs);
152 vi_reset_dev(vs); /* set all vectors to NO_VECTOR */
153 VS_UNLOCK(vs);
154 nvec = vs->vs_vc->vc_nvq + 1;
155 if (pci_emul_add_msixcap(vs->vs_pi, nvec, barnum))
156 return (1);
157 } else
158 vs->vs_flags &= ~VIRTIO_USE_MSIX;
159
160 /* Only 1 MSI vector for bhyve */
161 pci_emul_add_msicap(vs->vs_pi, 1);
162
163 /* Legacy interrupts are mandatory for virtio devices */
164 pci_lintr_request(vs->vs_pi);
165
166 return (0);
167}
168
169 /*
170 * Initialize the currently-selected virtio queue (vs->vs_curq).
171 * The guest just gave us a page frame number, from which we can
172 * calculate the addresses of the queue.
173 */
174 void
175 vi_vq_init(struct virtio_softc *vs, uint32_t pfn)
176{
177 struct vqueue_info *vq;
178 uint64_t phys;
179 size_t size;
180 char *base;
181
182 vq = &vs->vs_queues[vs->vs_curq];
183 vq->vq_pfn = pfn;
184 phys = (uint64_t)pfn << VRING_PFN;
185 size = vring_size(vq->vq_qsize);
186 base = paddr_guest2host(vs->vs_pi->pi_vmctx, phys, size);
187
188 /* First page(s) are descriptors... */
189 vq->vq_desc = (struct virtio_desc *)base;
190 base += vq->vq_qsize * sizeof(struct virtio_desc);
191
192 /* ... immediately followed by "avail" ring (entirely uint16_t's) */
193 vq->vq_avail = (struct vring_avail *)base;
194 base += (2 + vq->vq_qsize + 1) * sizeof(uint16_t);
195
196 /* Then it's rounded up to the next page... */
197 base = (char *)roundup2((uintptr_t)base, VRING_ALIGN);
198
199 /* ... and the last page(s) are the used ring. */
200 vq->vq_used = (struct vring_used *)base;
201
202 /* Mark queue as allocated, and start at 0 when we use it. */
203 vq->vq_flags = VQ_ALLOC;
204 vq->vq_last_avail = 0;
205 vq->vq_next_used = 0;
206 vq->vq_save_used = 0;
207}
208
209 /*
210 * Helper inline for vq_getchain(): record the i'th "real"
211 * descriptor.
212 */
213 static inline void
214 _vq_record(int i, volatile struct virtio_desc *vd, struct vmctx *ctx,
215 struct iovec *iov, int n_iov, uint16_t *flags) {
216
217 if (i >= n_iov)
218 return;
219 iov[i].iov_base = paddr_guest2host(ctx, vd->vd_addr, vd->vd_len);
220 iov[i].iov_len = vd->vd_len;
221 if (flags != NULL)
222 flags[i] = vd->vd_flags;
223}
224#define VQ_MAX_DESCRIPTORS 512 /* see below */
225
226 /*
227 * Examine the chain of descriptors starting at the "next one" to
228 * make sure that they describe a sensible request. If so, return
229 * the number of "real" descriptors that would be needed/used in
230 * acting on this request. This may be smaller than the number of
231 * available descriptors, e.g., if there are two available but
232 * they are two separate requests, this just returns 1. Or, it
233 * may be larger: if there are indirect descriptors involved,
234 * there may only be one descriptor available but it may be an
235 * indirect pointing to eight more. We return 8 in this case,
236 * i.e., we do not count the indirect descriptors, only the "real"
237 * ones.
238 *
239 * Basically, this vets the vd_flags and vd_next field of each
240 * descriptor and tells you how many are involved. Since some may
241 * be indirect, this also needs the vmctx (in the pci_devinst
242 * at vs->vs_pi) so that it can find indirect descriptors.
243 *
244 * As we process each descriptor, we copy and adjust it (guest to
245 * host address wise, also using the vmtctx) into the given iov[]
246 * array (of the given size). If the array overflows, we stop
247 * placing values into the array but keep processing descriptors,
248 * up to VQ_MAX_DESCRIPTORS, before giving up and returning -1.
249 * So you, the caller, must not assume that iov[] is as big as the
250 * return value (you can process the same thing twice to allocate
251 * a larger iov array if needed, or supply a zero length to find
252 * out how much space is needed).
253 *
254 * If you want to verify the WRITE flag on each descriptor, pass a
255 * non-NULL "flags" pointer to an array of "uint16_t" of the same size
256 * as n_iov and we'll copy each vd_flags field after unwinding any
257 * indirects.
258 *
259 * If some descriptor(s) are invalid, this prints a diagnostic message
260 * and returns -1. If no descriptors are ready now it simply returns 0.
261 *
262 * You are assumed to have done a vq_ring_ready() if needed (note
263 * that vq_has_descs() does one).
264 */
265 int
266 vq_getchain(struct vqueue_info *vq, uint16_t *pidx,
267 struct iovec *iov, int n_iov, uint16_t *flags)
268{
269 int i;
270 u_int ndesc, n_indir;
271 u_int idx, next;
272 volatile struct virtio_desc *vdir, *vindir, *vp;
273 struct vmctx *ctx;
274 struct virtio_softc *vs;
275 const char *name;
276
277 vs = vq->vq_vs;
278 name = vs->vs_vc->vc_name;
279
280 /*
281 * Note: it's the responsibility of the guest not to
282 * update vq->vq_avail->va_idx until all of the descriptors
283 * the guest has written are valid (including all their
284 * vd_next fields and vd_flags).
285 *
286 * Compute (va_idx - last_avail) in integers mod 2**16. This is
287 * the number of descriptors the device has made available
288 * since the last time we updated vq->vq_last_avail.
289 *
290 * We just need to do the subtraction as an unsigned int,
291 * then trim off excess bits.
292 */
293 idx = vq->vq_last_avail;
294 ndesc = (uint16_t)((u_int)vq->vq_avail->va_idx - idx);
295 if (ndesc == 0)
296 return (0);
297 if (ndesc > vq->vq_qsize) {
298 /* XXX need better way to diagnose issues */
299 EPRINTLN(
300 "%s: ndesc (%u) out of range, driver confused?",
301 name, (u_int)ndesc);
302 return (-1);
303 }
304
305 /*
306 * Now count/parse "involved" descriptors starting from
307 * the head of the chain.
308 *
309 * To prevent loops, we could be more complicated and
310 * check whether we're re-visiting a previously visited
311 * index, but we just abort if the count gets excessive.
312 */
313 ctx = vs->vs_pi->pi_vmctx;
314 *pidx = next = vq->vq_avail->va_ring[idx & (vq->vq_qsize - 1)];
315 vq->vq_last_avail++;
316 for (i = 0; i < VQ_MAX_DESCRIPTORS; next = vdir->vd_next) {
317 if (next >= vq->vq_qsize) {
318 EPRINTLN(
319 "%s: descriptor index %u out of range, "
320 "driver confused?",
321 name, next);
322 return (-1);
323 }
324 vdir = &vq->vq_desc[next];
325 if ((vdir->vd_flags & VRING_DESC_F_INDIRECT) == 0) {
326 _vq_record(i, vdir, ctx, iov, n_iov, flags);
327 i++;
328 } else if ((vs->vs_vc->vc_hv_caps &
329 VIRTIO_RING_F_INDIRECT_DESC) == 0) {
330 EPRINTLN(
331 "%s: descriptor has forbidden INDIRECT flag, "
332 "driver confused?",
333 name);
334 return (-1);
335 } else {
336 n_indir = vdir->vd_len / 16;
337 if ((vdir->vd_len & 0xf) || n_indir == 0) {
338 EPRINTLN(
339 "%s: invalid indir len 0x%x, "
340 "driver confused?",
341 name, (u_int)vdir->vd_len);
342 return (-1);
343 }
344 vindir = paddr_guest2host(ctx,
345 vdir->vd_addr, vdir->vd_len);
346 /*
347 * Indirects start at the 0th, then follow
348 * their own embedded "next"s until those run
349 * out. Each one's indirect flag must be off
350 * (we don't really have to check, could just
351 * ignore errors...).
352 */
353 next = 0;
354 for (;;) {
355 vp = &vindir[next];
356 if (vp->vd_flags & VRING_DESC_F_INDIRECT) {
357 EPRINTLN(
358 "%s: indirect desc has INDIR flag,"
359 " driver confused?",
360 name);
361 return (-1);
362 }
363 _vq_record(i, vp, ctx, iov, n_iov, flags);
364 if (++i > VQ_MAX_DESCRIPTORS)
365 goto loopy;
366 if ((vp->vd_flags & VRING_DESC_F_NEXT) == 0)
367 break;
368 next = vp->vd_next;
369 if (next >= n_indir) {
370 EPRINTLN(
371 "%s: invalid next %u > %u, "
372 "driver confused?",
373 name, (u_int)next, n_indir);
374 return (-1);
375 }
376 }
377 }
378 if ((vdir->vd_flags & VRING_DESC_F_NEXT) == 0)
379 return (i);
380 }
381 loopy:
382 EPRINTLN(
383 "%s: descriptor loop? count > %d - driver confused?",
384 name, i);
385 return (-1);
386}
387
388 /*
389 * Return the first n_chain request chains back to the available queue.
390 *
391 * (These chains are the ones you handled when you called vq_getchain()
392 * and used its positive return value.)
393 */
394 void
395 vq_retchains(struct vqueue_info *vq, uint16_t n_chains)
396{
397
398 vq->vq_last_avail -= n_chains;
399}
400
401 void
402 vq_relchain_prepare(struct vqueue_info *vq, uint16_t idx, uint32_t iolen)
403{
404 volatile struct vring_used *vuh;
405 volatile struct virtio_used *vue;
406 uint16_t mask;
407
408 /*
409 * Notes:
410 * - mask is N-1 where N is a power of 2 so computes x % N
411 * - vuh points to the "used" data shared with guest
412 * - vue points to the "used" ring entry we want to update
413 *
414 * (I apologize for the two fields named vu_idx; the
415 * virtio spec calls the one that vue points to, "id"...)
416 */
417 mask = vq->vq_qsize - 1;
418 vuh = vq->vq_used;
419
420 vue = &vuh->vu_ring[vq->vq_next_used++ & mask];
421 vue->vu_idx = idx;
422 vue->vu_tlen = iolen;
423}
424
425 void
426 vq_relchain_publish(struct vqueue_info *vq)
427{
428 /*
429 * Ensure the used descriptor is visible before updating the index.
430 * This is necessary on ISAs with memory ordering less strict than x86
431 * (and even on x86 to act as a compiler barrier).
432 */
433 atomic_thread_fence_rel();
434 vq->vq_used->vu_idx = vq->vq_next_used;
435}
436
437 /*
438 * Return specified request chain to the guest, setting its I/O length
439 * to the provided value.
440 *
441 * (This chain is the one you handled when you called vq_getchain()
442 * and used its positive return value.)
443 */
444 void
445 vq_relchain(struct vqueue_info *vq, uint16_t idx, uint32_t iolen)
446{
447 vq_relchain_prepare(vq, idx, iolen);
448 vq_relchain_publish(vq);
449}
450
451 /*
452 * Driver has finished processing "available" chains and calling
453 * vq_relchain on each one. If driver used all the available
454 * chains, used_all should be set.
455 *
456 * If the "used" index moved we may need to inform the guest, i.e.,
457 * deliver an interrupt. Even if the used index did NOT move we
458 * may need to deliver an interrupt, if the avail ring is empty and
459 * we are supposed to interrupt on empty.
460 *
461 * Note that used_all_avail is provided by the caller because it's
462 * a snapshot of the ring state when he decided to finish interrupt
463 * processing -- it's possible that descriptors became available after
464 * that point. (It's also typically a constant 1/True as well.)
465 */
466 void
467 vq_endchains(struct vqueue_info *vq, int used_all_avail)
468{
469 struct virtio_softc *vs;
470 uint16_t event_idx, new_idx, old_idx;
471 int intr;
472
473 /*
474 * Interrupt generation: if we're using EVENT_IDX,
475 * interrupt if we've crossed the event threshold.
476 * Otherwise interrupt is generated if we added "used" entries,
477 * but suppressed by VRING_AVAIL_F_NO_INTERRUPT.
478 *
479 * In any case, though, if NOTIFY_ON_EMPTY is set and the
480 * entire avail was processed, we need to interrupt always.
481 */
482 vs = vq->vq_vs;
483 old_idx = vq->vq_save_used;
484 vq->vq_save_used = new_idx = vq->vq_used->vu_idx;
485
486 /*
487 * Use full memory barrier between vu_idx store from preceding
488 * vq_relchain() call and the loads from VQ_USED_EVENT_IDX() or
489 * va_flags below.
490 */
491 atomic_thread_fence_seq_cst();
492 if (used_all_avail &&
493 (vs->vs_negotiated_caps & VIRTIO_F_NOTIFY_ON_EMPTY))
494 intr = 1;
495 else if (vs->vs_negotiated_caps & VIRTIO_RING_F_EVENT_IDX) {
496 event_idx = VQ_USED_EVENT_IDX(vq);
497 /*
498 * This calculation is per docs and the kernel
499 * (see src/sys/dev/virtio/virtio_ring.h).
500 */
501 intr = (uint16_t)(new_idx - event_idx - 1) <
502 (uint16_t)(new_idx - old_idx);
503 } else {
504 intr = new_idx != old_idx &&
505 !(vq->vq_avail->va_flags & VRING_AVAIL_F_NO_INTERRUPT);
506 }
507 if (intr)
508 vq_interrupt(vs, vq);
509}
510
511 /* Note: these are in sorted order to make for a fast search */
512 static struct config_reg {
513 uint16_t cr_offset; /* register offset */
514 uint8_t cr_size; /* size (bytes) */
515 uint8_t cr_ro; /* true => reg is read only */
516 const char *cr_name; /* name of reg */
517} config_regs[] = {
518 { VTCFG_R_HOSTCAP, 4, 1, "HOSTCAP" },
519 { VTCFG_R_GUESTCAP, 4, 0, "GUESTCAP" },
520 { VTCFG_R_PFN, 4, 0, "PFN" },
521 { VTCFG_R_QNUM, 2, 1, "QNUM" },
522 { VTCFG_R_QSEL, 2, 0, "QSEL" },
523 { VTCFG_R_QNOTIFY, 2, 0, "QNOTIFY" },
524 { VTCFG_R_STATUS, 1, 0, "STATUS" },
525 { VTCFG_R_ISR, 1, 0, "ISR" },
526 { VTCFG_R_CFGVEC, 2, 0, "CFGVEC" },
527 { VTCFG_R_QVEC, 2, 0, "QVEC" },
528};
529
530 static inline struct config_reg *
531 vi_find_cr(int offset) {
532 u_int hi, lo, mid;
533 struct config_reg *cr;
534
535 lo = 0;
536 hi = sizeof(config_regs) / sizeof(*config_regs) - 1;
537 while (hi >= lo) {
538 mid = (hi + lo) >> 1;
539 cr = &config_regs[mid];
540 if (cr->cr_offset == offset)
541 return (cr);
542 if (cr->cr_offset < offset)
543 lo = mid + 1;
544 else
545 hi = mid - 1;
546 }
547 return (NULL);
548}
549
550 /*
551 * Handle pci config space reads.
552 * If it's to the MSI-X info, do that.
553 * If it's part of the virtio standard stuff, do that.
554 * Otherwise dispatch to the actual driver.
555 */
556 uint64_t
557 vi_pci_read(struct vmctx *ctx, int vcpu, struct pci_devinst *pi,
558 int baridx, uint64_t offset, int size)
559{
560 struct virtio_softc *vs = pi->pi_arg;
561 struct virtio_consts *vc;
562 struct config_reg *cr;
563 uint64_t virtio_config_size, max;
564 const char *name;
565 uint32_t newoff;
566 uint32_t value;
567 int error;
568
569 if (vs->vs_flags & VIRTIO_USE_MSIX) {
570 if (baridx == pci_msix_table_bar(pi) ||
571 baridx == pci_msix_pba_bar(pi)) {
572 return (pci_emul_msix_tread(pi, offset, size));
573 }
574 }
575
576 /* XXX probably should do something better than just assert() */
577 assert(baridx == 0);
578
579 if (vs->vs_mtx)
580 pthread_mutex_lock(vs->vs_mtx);
581
582 vc = vs->vs_vc;
583 name = vc->vc_name;
584 value = size == 1 ? 0xff : size == 2 ? 0xffff : 0xffffffff;
585
586 if (size != 1 && size != 2 && size != 4)
587 goto bad;
588
589 if (pci_msix_enabled(pi))
590 virtio_config_size = VTCFG_R_CFG1;
591 else
592 virtio_config_size = VTCFG_R_CFG0;
593
594 if (offset >= virtio_config_size) {
595 /*
596 * Subtract off the standard size (including MSI-X
597 * registers if enabled) and dispatch to underlying driver.
598 * If that fails, fall into general code.
599 */
600 newoff = offset - virtio_config_size;
601 max = vc->vc_cfgsize ? vc->vc_cfgsize : 0x100000000;
602 if (newoff + size > max)
603 goto bad;
604 error = (*vc->vc_cfgread)(DEV_SOFTC(vs), newoff, size, &value);
605 if (!error)
606 goto done;
607 }
608
609 bad:
610 cr = vi_find_cr(offset);
611 if (cr == NULL || cr->cr_size != size) {
612 if (cr != NULL) {
613 /* offset must be OK, so size must be bad */
614 EPRINTLN(
615 "%s: read from %s: bad size %d",
616 name, cr->cr_name, size);
617 } else {
618 EPRINTLN(
619 "%s: read from bad offset/size %jd/%d",
620 name, (uintmax_t)offset, size);
621 }
622 goto done;
623 }
624
625 switch (offset) {
626 case VTCFG_R_HOSTCAP:
627 value = vc->vc_hv_caps;
628 break;
629 case VTCFG_R_GUESTCAP:
630 value = vs->vs_negotiated_caps;
631 break;
632 case VTCFG_R_PFN:
633 if (vs->vs_curq < vc->vc_nvq)
634 value = vs->vs_queues[vs->vs_curq].vq_pfn;
635 break;
636 case VTCFG_R_QNUM:
637 value = vs->vs_curq < vc->vc_nvq ?
638 vs->vs_queues[vs->vs_curq].vq_qsize : 0;
639 break;
640 case VTCFG_R_QSEL:
641 value = vs->vs_curq;
642 break;
643 case VTCFG_R_QNOTIFY:
644 value = 0; /* XXX */
645 break;
646 case VTCFG_R_STATUS:
647 value = vs->vs_status;
648 break;
649 case VTCFG_R_ISR:
650 value = vs->vs_isr;
651 vs->vs_isr = 0; /* a read clears this flag */
652 if (value)
653 pci_lintr_deassert(pi);
654 break;
655 case VTCFG_R_CFGVEC:
656 value = vs->vs_msix_cfg_idx;
657 break;
658 case VTCFG_R_QVEC:
659 value = vs->vs_curq < vc->vc_nvq ?
660 vs->vs_queues[vs->vs_curq].vq_msix_idx :
661 VIRTIO_MSI_NO_VECTOR;
662 break;
663 }
664 done:
665 if (vs->vs_mtx)
666 pthread_mutex_unlock(vs->vs_mtx);
667 return (value);
668}
669
670 /*
671 * Handle pci config space writes.
672 * If it's to the MSI-X info, do that.
673 * If it's part of the virtio standard stuff, do that.
674 * Otherwise dispatch to the actual driver.
675 */
676 void
677 vi_pci_write(struct vmctx *ctx, int vcpu, struct pci_devinst *pi,
678 int baridx, uint64_t offset, int size, uint64_t value)
679{
680 struct virtio_softc *vs = pi->pi_arg;
681 struct vqueue_info *vq;
682 struct virtio_consts *vc;
683 struct config_reg *cr;
684 uint64_t virtio_config_size, max;
685 const char *name;
686 uint32_t newoff;
687 int error;
688
689 if (vs->vs_flags & VIRTIO_USE_MSIX) {
690 if (baridx == pci_msix_table_bar(pi) ||
691 baridx == pci_msix_pba_bar(pi)) {
692 pci_emul_msix_twrite(pi, offset, size, value);
693 return;
694 }
695 }
696
697 /* XXX probably should do something better than just assert() */
698 assert(baridx == 0);
699
700 if (vs->vs_mtx)
701 pthread_mutex_lock(vs->vs_mtx);
702
703 vc = vs->vs_vc;
704 name = vc->vc_name;
705
706 if (size != 1 && size != 2 && size != 4)
707 goto bad;
708
709 if (pci_msix_enabled(pi))
710 virtio_config_size = VTCFG_R_CFG1;
711 else
712 virtio_config_size = VTCFG_R_CFG0;
713
714 if (offset >= virtio_config_size) {
715 /*
716 * Subtract off the standard size (including MSI-X
717 * registers if enabled) and dispatch to underlying driver.
718 */
719 newoff = offset - virtio_config_size;
720 max = vc->vc_cfgsize ? vc->vc_cfgsize : 0x100000000;
721 if (newoff + size > max)
722 goto bad;
723 error = (*vc->vc_cfgwrite)(DEV_SOFTC(vs), newoff, size, value);
724 if (!error)
725 goto done;
726 }
727
728 bad:
729 cr = vi_find_cr(offset);
730 if (cr == NULL || cr->cr_size != size || cr->cr_ro) {
731 if (cr != NULL) {
732 /* offset must be OK, wrong size and/or reg is R/O */
733 if (cr->cr_size != size)
734 EPRINTLN(
735 "%s: write to %s: bad size %d",
736 name, cr->cr_name, size);
737 if (cr->cr_ro)
738 EPRINTLN(
739 "%s: write to read-only reg %s",
740 name, cr->cr_name);
741 } else {
742 EPRINTLN(
743 "%s: write to bad offset/size %jd/%d",
744 name, (uintmax_t)offset, size);
745 }
746 goto done;
747 }
748
749 switch (offset) {
750 case VTCFG_R_GUESTCAP:
751 vs->vs_negotiated_caps = value & vc->vc_hv_caps;
752 if (vc->vc_apply_features)
753 (*vc->vc_apply_features)(DEV_SOFTC(vs),
754 vs->vs_negotiated_caps);
755 break;
756 case VTCFG_R_PFN:
757 if (vs->vs_curq >= vc->vc_nvq)
758 goto bad_qindex;
759 vi_vq_init(vs, value);
760 break;
761 case VTCFG_R_QSEL:
762 /*
763 * Note that the guest is allowed to select an
764 * invalid queue; we just need to return a QNUM
765 * of 0 while the bad queue is selected.
766 */
767 vs->vs_curq = value;
768 break;
769 case VTCFG_R_QNOTIFY:
770 if (value >= vc->vc_nvq) {
771 EPRINTLN("%s: queue %d notify out of range",
772 name, (int)value);
773 goto done;
774 }
775 vq = &vs->vs_queues[value];
776 if (vq->vq_notify)
777 (*vq->vq_notify)(DEV_SOFTC(vs), vq);
778 else if (vc->vc_qnotify)
779 (*vc->vc_qnotify)(DEV_SOFTC(vs), vq);
780 else
781 EPRINTLN(
782 "%s: qnotify queue %d: missing vq/vc notify",
783 name, (int)value);
784 break;
785 case VTCFG_R_STATUS:
786 vs->vs_status = value;
787 if (value == 0)
788 (*vc->vc_reset)(DEV_SOFTC(vs));
789 break;
790 case VTCFG_R_CFGVEC:
791 vs->vs_msix_cfg_idx = value;
792 break;
793 case VTCFG_R_QVEC:
794 if (vs->vs_curq >= vc->vc_nvq)
795 goto bad_qindex;
796 vq = &vs->vs_queues[vs->vs_curq];
797 vq->vq_msix_idx = value;
798 break;
799 }
800 goto done;
801
802 bad_qindex:
803 EPRINTLN(
804 "%s: write config reg %s: curq %d >= max %d",
805 name, cr->cr_name, vs->vs_curq, vc->vc_nvq);
806 done:
807 if (vs->vs_mtx)
808 pthread_mutex_unlock(vs->vs_mtx);
809}
810
811#ifdef BHYVE_SNAPSHOT
812 int
813 vi_pci_pause(struct vmctx *ctx, struct pci_devinst *pi)
814{
815 struct virtio_softc *vs;
816 struct virtio_consts *vc;
817
818 vs = pi->pi_arg;
819 vc = vs->vs_vc;
820
821 vc = vs->vs_vc;
822 assert(vc->vc_pause != NULL);
823 (*vc->vc_pause)(DEV_SOFTC(vs));
824
825 return (0);
826}
827
828 int
829 vi_pci_resume(struct vmctx *ctx, struct pci_devinst *pi)
830{
831 struct virtio_softc *vs;
832 struct virtio_consts *vc;
833
834 vs = pi->pi_arg;
835 vc = vs->vs_vc;
836
837 vc = vs->vs_vc;
838 assert(vc->vc_resume != NULL);
839 (*vc->vc_resume)(DEV_SOFTC(vs));
840
841 return (0);
842}
843
844 static int
845 vi_pci_snapshot_softc(struct virtio_softc *vs, struct vm_snapshot_meta *meta)
846{
847 int ret;
848
849 SNAPSHOT_VAR_OR_LEAVE(vs->vs_flags, meta, ret, done);
850 SNAPSHOT_VAR_OR_LEAVE(vs->vs_negotiated_caps, meta, ret, done);
851 SNAPSHOT_VAR_OR_LEAVE(vs->vs_curq, meta, ret, done);
852 SNAPSHOT_VAR_OR_LEAVE(vs->vs_status, meta, ret, done);
853 SNAPSHOT_VAR_OR_LEAVE(vs->vs_isr, meta, ret, done);
854 SNAPSHOT_VAR_OR_LEAVE(vs->vs_msix_cfg_idx, meta, ret, done);
855
856 done:
857 return (ret);
858}
859
860 static int
861 vi_pci_snapshot_consts(struct virtio_consts *vc, struct vm_snapshot_meta *meta)
862{
863 int ret;
864
865 SNAPSHOT_VAR_CMP_OR_LEAVE(vc->vc_nvq, meta, ret, done);
866 SNAPSHOT_VAR_CMP_OR_LEAVE(vc->vc_cfgsize, meta, ret, done);
867 SNAPSHOT_VAR_CMP_OR_LEAVE(vc->vc_hv_caps, meta, ret, done);
868
869 done:
870 return (ret);
871}
872
873 static int
874 vi_pci_snapshot_queues(struct virtio_softc *vs, struct vm_snapshot_meta *meta)
875{
876 int i;
877 int ret;
878 struct virtio_consts *vc;
879 struct vqueue_info *vq;
880 uint64_t addr_size;
881
882 vc = vs->vs_vc;
883
884 /* Save virtio queue info */
885 for (i = 0; i < vc->vc_nvq; i++) {
886 vq = &vs->vs_queues[i];
887
888 SNAPSHOT_VAR_CMP_OR_LEAVE(vq->vq_qsize, meta, ret, done);
889 SNAPSHOT_VAR_CMP_OR_LEAVE(vq->vq_num, meta, ret, done);
890
891 SNAPSHOT_VAR_OR_LEAVE(vq->vq_flags, meta, ret, done);
892 SNAPSHOT_VAR_OR_LEAVE(vq->vq_last_avail, meta, ret, done);
893 SNAPSHOT_VAR_OR_LEAVE(vq->vq_next_used, meta, ret, done);
894 SNAPSHOT_VAR_OR_LEAVE(vq->vq_save_used, meta, ret, done);
895 SNAPSHOT_VAR_OR_LEAVE(vq->vq_msix_idx, meta, ret, done);
896
897 SNAPSHOT_VAR_OR_LEAVE(vq->vq_pfn, meta, ret, done);
898
899 addr_size = vq->vq_qsize * sizeof(struct virtio_desc);
900 SNAPSHOT_GUEST2HOST_ADDR_OR_LEAVE(vq->vq_desc, addr_size,
901 false, meta, ret, done);
902
903 addr_size = (2 + vq->vq_qsize + 1) * sizeof(uint16_t);
904 SNAPSHOT_GUEST2HOST_ADDR_OR_LEAVE(vq->vq_avail, addr_size,
905 false, meta, ret, done);
906
907 addr_size = (2 + 2 * vq->vq_qsize + 1) * sizeof(uint16_t);
908 SNAPSHOT_GUEST2HOST_ADDR_OR_LEAVE(vq->vq_used, addr_size,
909 false, meta, ret, done);
910
911 SNAPSHOT_BUF_OR_LEAVE(vq->vq_desc, vring_size(vq->vq_qsize),
912 meta, ret, done);
913 }
914
915 done:
916 return (ret);
917}
918
919 int
920 vi_pci_snapshot(struct vm_snapshot_meta *meta)
921{
922 int ret;
923 struct pci_devinst *pi;
924 struct virtio_softc *vs;
925 struct virtio_consts *vc;
926
927 pi = meta->dev_data;
928 vs = pi->pi_arg;
929 vc = vs->vs_vc;
930
931 /* Save virtio softc */
932 ret = vi_pci_snapshot_softc(vs, meta);
933 if (ret != 0)
934 goto done;
935
936 /* Save virtio consts */
937 ret = vi_pci_snapshot_consts(vc, meta);
938 if (ret != 0)
939 goto done;
940
941 /* Save virtio queue info */
942 ret = vi_pci_snapshot_queues(vs, meta);
943 if (ret != 0)
944 goto done;
945
946 /* Save device softc, if needed */
947 if (vc->vc_snapshot != NULL) {
948 ret = (*vc->vc_snapshot)(DEV_SOFTC(vs), meta);
949 if (ret != 0)
950 goto done;
951 }
952
953 done:
954 return (ret);
955}
956#endif
957 

AltStyle によって変換されたページ (->オリジナル) /