Super User's BSD Cross Reference: /FreeBSD/sys/geom/geom_subr.c

1 /*-
2 * SPDX-License-Identifier: BSD-3-Clause
3 *
4 * Copyright (c) 2002 Poul-Henning Kamp
5 * Copyright (c) 2002 Networks Associates Technology, Inc.
6 * All rights reserved.
7 *
8 * This software was developed for the FreeBSD Project by Poul-Henning Kamp
9 * and NAI Labs, the Security Research Division of Network Associates, Inc.
10 * under DARPA/SPAWAR contract N66001-01-C-8035 ("CBOSS"), as part of the
11 * DARPA CHATS research program.
12 *
13 * Redistribution and use in source and binary forms, with or without
14 * modification, are permitted provided that the following conditions
15 * are met:
16 * 1. Redistributions of source code must retain the above copyright
17 * notice, this list of conditions and the following disclaimer.
18 * 2. Redistributions in binary form must reproduce the above copyright
19 * notice, this list of conditions and the following disclaimer in the
20 * documentation and/or other materials provided with the distribution.
21 * 3. The names of the authors may not be used to endorse or promote
22 * products derived from this software without specific prior written
23 * permission.
24 *
25 * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
26 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
27 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
28 * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
29 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
30 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
31 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
32 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
33 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
34 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
35 * SUCH DAMAGE.
36 */
37
38#include <sys/cdefs.h>
39 __FBSDID("$FreeBSD$");
40
41#include "opt_ddb.h"
42
43#include <sys/param.h>
44#include <sys/systm.h>
45#include <sys/devicestat.h>
46#include <sys/kernel.h>
47#include <sys/malloc.h>
48#include <sys/bio.h>
49#include <sys/sysctl.h>
50#include <sys/proc.h>
51#include <sys/kthread.h>
52#include <sys/lock.h>
53#include <sys/mutex.h>
54#include <sys/errno.h>
55#include <sys/sbuf.h>
56#include <sys/sdt.h>
57#include <geom/geom.h>
58#include <geom/geom_dbg.h>
59#include <geom/geom_int.h>
60#include <machine/stdarg.h>
61
62#ifdef DDB
63#include <ddb/ddb.h>
64#endif
65
66#ifdef KDB
67#include <sys/kdb.h>
68#endif
69
70 SDT_PROVIDER_DEFINE(geom);
71
72 struct class_list_head g_classes = LIST_HEAD_INITIALIZER(g_classes);
73 static struct g_tailq_head geoms = TAILQ_HEAD_INITIALIZER(geoms);
74 char *g_wait_event, *g_wait_up, *g_wait_down, *g_wait_sim;
75
76 struct g_hh00 {
77 struct g_class *mp;
78 struct g_provider *pp;
79 off_t size;
80 int error;
81 int post;
82};
83
84 void
85 g_dbg_printf(const char *classname, int lvl, struct bio *bp,
86 const char *format,
87 ...)
88{
89#ifndef PRINTF_BUFR_SIZE
90#define PRINTF_BUFR_SIZE 64
91#endif
92 char bufr[PRINTF_BUFR_SIZE];
93 struct sbuf sb, *sbp __unused;
94 va_list ap;
95
96 sbp = sbuf_new(&sb, bufr, sizeof(bufr), SBUF_FIXEDLEN);
97 KASSERT(sbp != NULL, ("sbuf_new misused?"));
98
99 sbuf_set_drain(&sb, sbuf_printf_drain, NULL);
100
101 sbuf_cat(&sb, classname);
102 if (lvl >= 0)
103 sbuf_printf(&sb, "[%d]", lvl);
104
105 va_start(ap, format);
106 sbuf_vprintf(&sb, format, ap);
107 va_end(ap);
108
109 if (bp != NULL) {
110 sbuf_putc(&sb, ' ');
111 g_format_bio(&sb, bp);
112 }
113
114 /* Terminate the debug line with a single '\n'. */
115 sbuf_nl_terminate(&sb);
116
117 /* Flush line to printf. */
118 sbuf_finish(&sb);
119 sbuf_delete(&sb);
120}
121
122 /*
123 * This event offers a new class a chance to taste all preexisting providers.
124 */
125 static void
126 g_load_class(void *arg, int flag)
127{
128 struct g_hh00 *hh;
129 struct g_class *mp2, *mp;
130 struct g_geom *gp;
131 struct g_provider *pp;
132
133 g_topology_assert();
134 if (flag == EV_CANCEL) /* XXX: can't happen ? */
135 return;
136 if (g_shutdown)
137 return;
138
139 hh = arg;
140 mp = hh->mp;
141 hh->error = 0;
142 if (hh->post) {
143 g_free(hh);
144 hh = NULL;
145 }
146 g_trace(G_T_TOPOLOGY, "g_load_class(%s)", mp->name);
147 KASSERT(mp->name != NULL && *mp->name != '0円',
148 ("GEOM class has no name"));
149 LIST_FOREACH(mp2, &g_classes, class) {
150 if (mp2 == mp) {
151 printf("The GEOM class %s is already loaded.\n",
152 mp2->name);
153 if (hh != NULL)
154 hh->error = EEXIST;
155 return;
156 } else if (strcmp(mp2->name, mp->name) == 0) {
157 printf("A GEOM class %s is already loaded.\n",
158 mp2->name);
159 if (hh != NULL)
160 hh->error = EEXIST;
161 return;
162 }
163 }
164
165 LIST_INIT(&mp->geom);
166 LIST_INSERT_HEAD(&g_classes, mp, class);
167 if (mp->init != NULL)
168 mp->init(mp);
169 if (mp->taste == NULL)
170 return;
171 LIST_FOREACH(mp2, &g_classes, class) {
172 if (mp == mp2)
173 continue;
174 LIST_FOREACH(gp, &mp2->geom, geom) {
175 LIST_FOREACH(pp, &gp->provider, provider) {
176 mp->taste(mp, pp, 0);
177 g_topology_assert();
178 }
179 }
180 }
181}
182
183 static int
184 g_unload_class(struct g_class *mp)
185{
186 struct g_geom *gp;
187 struct g_provider *pp;
188 struct g_consumer *cp;
189 int error;
190
191 g_topology_lock();
192 g_trace(G_T_TOPOLOGY, "g_unload_class(%s)", mp->name);
193 retry:
194 G_VALID_CLASS(mp);
195 LIST_FOREACH(gp, &mp->geom, geom) {
196 /* We refuse to unload if anything is open */
197 LIST_FOREACH(pp, &gp->provider, provider)
198 if (pp->acr || pp->acw || pp->ace) {
199 g_topology_unlock();
200 return (EBUSY);
201 }
202 LIST_FOREACH(cp, &gp->consumer, consumer)
203 if (cp->acr || cp->acw || cp->ace) {
204 g_topology_unlock();
205 return (EBUSY);
206 }
207 /* If the geom is withering, wait for it to finish. */
208 if (gp->flags & G_GEOM_WITHER) {
209 g_topology_sleep(mp, 1);
210 goto retry;
211 }
212 }
213
214 /*
215 * We allow unloading if we have no geoms, or a class
216 * method we can use to get rid of them.
217 */
218 if (!LIST_EMPTY(&mp->geom) && mp->destroy_geom == NULL) {
219 g_topology_unlock();
220 return (EOPNOTSUPP);
221 }
222
223 /* Bar new entries */
224 mp->taste = NULL;
225 mp->config = NULL;
226
227 LIST_FOREACH(gp, &mp->geom, geom) {
228 error = mp->destroy_geom(NULL, mp, gp);
229 if (error != 0) {
230 g_topology_unlock();
231 return (error);
232 }
233 }
234 /* Wait for withering to finish. */
235 for (;;) {
236 gp = LIST_FIRST(&mp->geom);
237 if (gp == NULL)
238 break;
239 KASSERT(gp->flags & G_GEOM_WITHER,
240 ("Non-withering geom in class %s", mp->name));
241 g_topology_sleep(mp, 1);
242 }
243 G_VALID_CLASS(mp);
244 if (mp->fini != NULL)
245 mp->fini(mp);
246 LIST_REMOVE(mp, class);
247 g_topology_unlock();
248
249 return (0);
250}
251
252 int
253 g_modevent(module_t mod, int type, void *data)
254{
255 struct g_hh00 *hh;
256 int error;
257 static int g_ignition;
258 struct g_class *mp;
259
260 mp = data;
261 if (mp->version != G_VERSION) {
262 printf("GEOM class %s has Wrong version %x\n",
263 mp->name, mp->version);
264 return (EINVAL);
265 }
266 if (!g_ignition) {
267 g_ignition++;
268 g_init();
269 }
270 error = EOPNOTSUPP;
271 switch (type) {
272 case MOD_LOAD:
273 g_trace(G_T_TOPOLOGY, "g_modevent(%s, LOAD)", mp->name);
274 hh = g_malloc(sizeof *hh, M_WAITOK | M_ZERO);
275 hh->mp = mp;
276 /*
277 * Once the system is not cold, MOD_LOAD calls will be
278 * from the userland and the g_event thread will be able
279 * to acknowledge their completion.
280 */
281 if (cold) {
282 hh->post = 1;
283 error = g_post_event(g_load_class, hh, M_WAITOK, NULL);
284 } else {
285 error = g_waitfor_event(g_load_class, hh, M_WAITOK,
286 NULL);
287 if (error == 0)
288 error = hh->error;
289 g_free(hh);
290 }
291 break;
292 case MOD_UNLOAD:
293 g_trace(G_T_TOPOLOGY, "g_modevent(%s, UNLOAD)", mp->name);
294 error = g_unload_class(mp);
295 if (error == 0) {
296 KASSERT(LIST_EMPTY(&mp->geom),
297 ("Unloaded class (%s) still has geom", mp->name));
298 }
299 break;
300 }
301 return (error);
302}
303
304 static void
305 g_retaste_event(void *arg, int flag)
306{
307 struct g_class *mp, *mp2;
308 struct g_geom *gp;
309 struct g_hh00 *hh;
310 struct g_provider *pp;
311 struct g_consumer *cp;
312
313 g_topology_assert();
314 if (flag == EV_CANCEL) /* XXX: can't happen ? */
315 return;
316 if (g_shutdown || g_notaste)
317 return;
318
319 hh = arg;
320 mp = hh->mp;
321 hh->error = 0;
322 if (hh->post) {
323 g_free(hh);
324 hh = NULL;
325 }
326 g_trace(G_T_TOPOLOGY, "g_retaste(%s)", mp->name);
327
328 LIST_FOREACH(mp2, &g_classes, class) {
329 LIST_FOREACH(gp, &mp2->geom, geom) {
330 LIST_FOREACH(pp, &gp->provider, provider) {
331 if (pp->acr || pp->acw || pp->ace)
332 continue;
333 LIST_FOREACH(cp, &pp->consumers, consumers) {
334 if (cp->geom->class == mp &&
335 (cp->flags & G_CF_ORPHAN) == 0)
336 break;
337 }
338 if (cp != NULL) {
339 cp->flags |= G_CF_ORPHAN;
340 g_wither_geom(cp->geom, ENXIO);
341 }
342 mp->taste(mp, pp, 0);
343 g_topology_assert();
344 }
345 }
346 }
347}
348
349 int
350 g_retaste(struct g_class *mp)
351{
352 struct g_hh00 *hh;
353 int error;
354
355 if (mp->taste == NULL)
356 return (EINVAL);
357
358 hh = g_malloc(sizeof *hh, M_WAITOK | M_ZERO);
359 hh->mp = mp;
360
361 if (cold) {
362 hh->post = 1;
363 error = g_post_event(g_retaste_event, hh, M_WAITOK, NULL);
364 } else {
365 error = g_waitfor_event(g_retaste_event, hh, M_WAITOK, NULL);
366 if (error == 0)
367 error = hh->error;
368 g_free(hh);
369 }
370
371 return (error);
372}
373
374 struct g_geom *
375 g_new_geomf(struct g_class *mp, const char *fmt, ...)
376{
377 struct g_geom *gp;
378 va_list ap;
379 struct sbuf *sb;
380
381 g_topology_assert();
382 G_VALID_CLASS(mp);
383 sb = sbuf_new_auto();
384 va_start(ap, fmt);
385 sbuf_vprintf(sb, fmt, ap);
386 va_end(ap);
387 sbuf_finish(sb);
388 gp = g_malloc(sizeof *gp, M_WAITOK | M_ZERO);
389 gp->name = g_malloc(sbuf_len(sb) + 1, M_WAITOK | M_ZERO);
390 gp->class = mp;
391 gp->rank = 1;
392 LIST_INIT(&gp->consumer);
393 LIST_INIT(&gp->provider);
394 LIST_INSERT_HEAD(&mp->geom, gp, geom);
395 TAILQ_INSERT_HEAD(&geoms, gp, geoms);
396 strcpy(gp->name, sbuf_data(sb));
397 sbuf_delete(sb);
398 /* Fill in defaults from class */
399 gp->start = mp->start;
400 gp->spoiled = mp->spoiled;
401 gp->attrchanged = mp->attrchanged;
402 gp->providergone = mp->providergone;
403 gp->dumpconf = mp->dumpconf;
404 gp->access = mp->access;
405 gp->orphan = mp->orphan;
406 gp->ioctl = mp->ioctl;
407 gp->resize = mp->resize;
408 return (gp);
409}
410
411 void
412 g_destroy_geom(struct g_geom *gp)
413{
414
415 g_topology_assert();
416 G_VALID_GEOM(gp);
417 g_trace(G_T_TOPOLOGY, "g_destroy_geom(%p(%s))", gp, gp->name);
418 KASSERT(LIST_EMPTY(&gp->consumer),
419 ("g_destroy_geom(%s) with consumer(s) [%p]",
420 gp->name, LIST_FIRST(&gp->consumer)));
421 KASSERT(LIST_EMPTY(&gp->provider),
422 ("g_destroy_geom(%s) with provider(s) [%p]",
423 gp->name, LIST_FIRST(&gp->provider)));
424 g_cancel_event(gp);
425 LIST_REMOVE(gp, geom);
426 TAILQ_REMOVE(&geoms, gp, geoms);
427 g_free(gp->name);
428 g_free(gp);
429}
430
431 /*
432 * This function is called (repeatedly) until the geom has withered away.
433 */
434 void
435 g_wither_geom(struct g_geom *gp, int error)
436{
437 struct g_provider *pp;
438
439 g_topology_assert();
440 G_VALID_GEOM(gp);
441 g_trace(G_T_TOPOLOGY, "g_wither_geom(%p(%s))", gp, gp->name);
442 if (!(gp->flags & G_GEOM_WITHER)) {
443 gp->flags |= G_GEOM_WITHER;
444 LIST_FOREACH(pp, &gp->provider, provider)
445 if (!(pp->flags & G_PF_ORPHAN))
446 g_orphan_provider(pp, error);
447 }
448 g_do_wither();
449}
450
451 /*
452 * Convenience function to destroy a particular provider.
453 */
454 void
455 g_wither_provider(struct g_provider *pp, int error)
456{
457
458 pp->flags |= G_PF_WITHER;
459 if (!(pp->flags & G_PF_ORPHAN))
460 g_orphan_provider(pp, error);
461}
462
463 /*
464 * This function is called (repeatedly) until the has withered away.
465 */
466 void
467 g_wither_geom_close(struct g_geom *gp, int error)
468{
469 struct g_consumer *cp;
470
471 g_topology_assert();
472 G_VALID_GEOM(gp);
473 g_trace(G_T_TOPOLOGY, "g_wither_geom_close(%p(%s))", gp, gp->name);
474 LIST_FOREACH(cp, &gp->consumer, consumer)
475 if (cp->acr || cp->acw || cp->ace)
476 g_access(cp, -cp->acr, -cp->acw, -cp->ace);
477 g_wither_geom(gp, error);
478}
479
480 /*
481 * This function is called (repeatedly) until we cant wash away more
482 * withered bits at present.
483 */
484 void
485 g_wither_washer()
486{
487 struct g_class *mp;
488 struct g_geom *gp, *gp2;
489 struct g_provider *pp, *pp2;
490 struct g_consumer *cp, *cp2;
491
492 g_topology_assert();
493 LIST_FOREACH(mp, &g_classes, class) {
494 LIST_FOREACH_SAFE(gp, &mp->geom, geom, gp2) {
495 LIST_FOREACH_SAFE(pp, &gp->provider, provider, pp2) {
496 if (!(pp->flags & G_PF_WITHER))
497 continue;
498 if (LIST_EMPTY(&pp->consumers))
499 g_destroy_provider(pp);
500 }
501 if (!(gp->flags & G_GEOM_WITHER))
502 continue;
503 LIST_FOREACH_SAFE(pp, &gp->provider, provider, pp2) {
504 if (LIST_EMPTY(&pp->consumers))
505 g_destroy_provider(pp);
506 }
507 LIST_FOREACH_SAFE(cp, &gp->consumer, consumer, cp2) {
508 if (cp->acr || cp->acw || cp->ace)
509 continue;
510 if (cp->provider != NULL)
511 g_detach(cp);
512 g_destroy_consumer(cp);
513 }
514 if (LIST_EMPTY(&gp->provider) &&
515 LIST_EMPTY(&gp->consumer))
516 g_destroy_geom(gp);
517 }
518 }
519}
520
521 struct g_consumer *
522 g_new_consumer(struct g_geom *gp)
523{
524 struct g_consumer *cp;
525
526 g_topology_assert();
527 G_VALID_GEOM(gp);
528 KASSERT(!(gp->flags & G_GEOM_WITHER),
529 ("g_new_consumer on WITHERing geom(%s) (class %s)",
530 gp->name, gp->class->name));
531 KASSERT(gp->orphan != NULL,
532 ("g_new_consumer on geom(%s) (class %s) without orphan",
533 gp->name, gp->class->name));
534
535 cp = g_malloc(sizeof *cp, M_WAITOK | M_ZERO);
536 cp->geom = gp;
537 cp->stat = devstat_new_entry(cp, -1, 0, DEVSTAT_ALL_SUPPORTED,
538 DEVSTAT_TYPE_DIRECT, DEVSTAT_PRIORITY_MAX);
539 LIST_INSERT_HEAD(&gp->consumer, cp, consumer);
540 return(cp);
541}
542
543 void
544 g_destroy_consumer(struct g_consumer *cp)
545{
546 struct g_geom *gp;
547
548 g_topology_assert();
549 G_VALID_CONSUMER(cp);
550 g_trace(G_T_TOPOLOGY, "g_destroy_consumer(%p)", cp);
551 KASSERT (cp->provider == NULL, ("g_destroy_consumer but attached"));
552 KASSERT (cp->acr == 0, ("g_destroy_consumer with acr"));
553 KASSERT (cp->acw == 0, ("g_destroy_consumer with acw"));
554 KASSERT (cp->ace == 0, ("g_destroy_consumer with ace"));
555 g_cancel_event(cp);
556 gp = cp->geom;
557 LIST_REMOVE(cp, consumer);
558 devstat_remove_entry(cp->stat);
559 g_free(cp);
560 if (gp->flags & G_GEOM_WITHER)
561 g_do_wither();
562}
563
564 static void
565 g_new_provider_event(void *arg, int flag)
566{
567 struct g_class *mp;
568 struct g_provider *pp;
569 struct g_consumer *cp, *next_cp;
570
571 g_topology_assert();
572 if (flag == EV_CANCEL)
573 return;
574 if (g_shutdown)
575 return;
576 pp = arg;
577 G_VALID_PROVIDER(pp);
578 KASSERT(!(pp->flags & G_PF_WITHER),
579 ("g_new_provider_event but withered"));
580 LIST_FOREACH_SAFE(cp, &pp->consumers, consumers, next_cp) {
581 if ((cp->flags & G_CF_ORPHAN) == 0 &&
582 cp->geom->attrchanged != NULL)
583 cp->geom->attrchanged(cp, "GEOM::media");
584 }
585 if (g_notaste)
586 return;
587 LIST_FOREACH(mp, &g_classes, class) {
588 if (mp->taste == NULL)
589 continue;
590 LIST_FOREACH(cp, &pp->consumers, consumers)
591 if (cp->geom->class == mp &&
592 (cp->flags & G_CF_ORPHAN) == 0)
593 break;
594 if (cp != NULL)
595 continue;
596 mp->taste(mp, pp, 0);
597 g_topology_assert();
598 }
599}
600
601 struct g_provider *
602 g_new_providerf(struct g_geom *gp, const char *fmt, ...)
603{
604 struct g_provider *pp;
605 struct sbuf *sb;
606 va_list ap;
607
608 g_topology_assert();
609 G_VALID_GEOM(gp);
610 KASSERT(gp->access != NULL,
611 ("new provider on geom(%s) without ->access (class %s)",
612 gp->name, gp->class->name));
613 KASSERT(gp->start != NULL,
614 ("new provider on geom(%s) without ->start (class %s)",
615 gp->name, gp->class->name));
616 KASSERT(!(gp->flags & G_GEOM_WITHER),
617 ("new provider on WITHERing geom(%s) (class %s)",
618 gp->name, gp->class->name));
619 sb = sbuf_new_auto();
620 va_start(ap, fmt);
621 sbuf_vprintf(sb, fmt, ap);
622 va_end(ap);
623 sbuf_finish(sb);
624 pp = g_malloc(sizeof *pp + sbuf_len(sb) + 1, M_WAITOK | M_ZERO);
625 pp->name = (char *)(pp + 1);
626 strcpy(pp->name, sbuf_data(sb));
627 sbuf_delete(sb);
628 LIST_INIT(&pp->consumers);
629 LIST_INIT(&pp->aliases);
630 pp->error = ENXIO;
631 pp->geom = gp;
632 pp->stat = devstat_new_entry(pp, -1, 0, DEVSTAT_ALL_SUPPORTED,
633 DEVSTAT_TYPE_DIRECT, DEVSTAT_PRIORITY_MAX);
634 LIST_INSERT_HEAD(&gp->provider, pp, provider);
635 g_post_event(g_new_provider_event, pp, M_WAITOK, pp, gp, NULL);
636 return (pp);
637}
638
639 void
640 g_provider_add_alias(struct g_provider *pp, const char *fmt, ...)
641{
642 struct sbuf *sb;
643 struct g_geom_alias *gap;
644 va_list ap;
645
646 /*
647 * Generate the alias string and save it in the list.
648 */
649 sb = sbuf_new_auto();
650 va_start(ap, fmt);
651 sbuf_vprintf(sb, fmt, ap);
652 va_end(ap);
653 sbuf_finish(sb);
654
655 LIST_FOREACH(gap, &pp->aliases, ga_next) {
656 if (strcmp(gap->ga_alias, sbuf_data(sb)) != 0)
657 continue;
658 /* Don't re-add the same alias. */
659 sbuf_delete(sb);
660 return;
661 }
662
663 gap = g_malloc(sizeof(*gap) + sbuf_len(sb) + 1, M_WAITOK | M_ZERO);
664 memcpy((char *)(gap + 1), sbuf_data(sb), sbuf_len(sb));
665 sbuf_delete(sb);
666 gap->ga_alias = (const char *)(gap + 1);
667 LIST_INSERT_HEAD(&pp->aliases, gap, ga_next);
668}
669
670 void
671 g_error_provider(struct g_provider *pp, int error)
672{
673
674 /* G_VALID_PROVIDER(pp); We may not have g_topology */
675 pp->error = error;
676}
677
678 static void
679 g_resize_provider_event(void *arg, int flag)
680{
681 struct g_hh00 *hh;
682 struct g_class *mp;
683 struct g_geom *gp;
684 struct g_provider *pp;
685 struct g_consumer *cp, *cp2;
686 off_t size;
687
688 g_topology_assert();
689 if (g_shutdown)
690 return;
691
692 hh = arg;
693 pp = hh->pp;
694 size = hh->size;
695 g_free(hh);
696
697 G_VALID_PROVIDER(pp);
698 KASSERT(!(pp->flags & G_PF_WITHER),
699 ("g_resize_provider_event but withered"));
700 g_trace(G_T_TOPOLOGY, "g_resize_provider_event(%p)", pp);
701
702 LIST_FOREACH_SAFE(cp, &pp->consumers, consumers, cp2) {
703 gp = cp->geom;
704 if (gp->resize == NULL && size < pp->mediasize) {
705 /*
706 * XXX: g_dev_orphan method does deferred destroying
707 * and it is possible, that other event could already
708 * call the orphan method. Check consumer's flags to
709 * do not schedule it twice.
710 */
711 if (cp->flags & G_CF_ORPHAN)
712 continue;
713 cp->flags |= G_CF_ORPHAN;
714 cp->geom->orphan(cp);
715 }
716 }
717
718 pp->mediasize = size;
719
720 LIST_FOREACH_SAFE(cp, &pp->consumers, consumers, cp2) {
721 gp = cp->geom;
722 if ((gp->flags & G_GEOM_WITHER) == 0 && gp->resize != NULL)
723 gp->resize(cp);
724 }
725
726 /*
727 * After resizing, the previously invalid GEOM class metadata
728 * might become valid. This means we should retaste.
729 */
730 LIST_FOREACH(mp, &g_classes, class) {
731 if (mp->taste == NULL)
732 continue;
733 LIST_FOREACH(cp, &pp->consumers, consumers)
734 if (cp->geom->class == mp &&
735 (cp->flags & G_CF_ORPHAN) == 0)
736 break;
737 if (cp != NULL)
738 continue;
739 mp->taste(mp, pp, 0);
740 g_topology_assert();
741 }
742}
743
744 void
745 g_resize_provider(struct g_provider *pp, off_t size)
746{
747 struct g_hh00 *hh;
748
749 G_VALID_PROVIDER(pp);
750 if (pp->flags & G_PF_WITHER)
751 return;
752
753 if (size == pp->mediasize)
754 return;
755
756 hh = g_malloc(sizeof *hh, M_WAITOK | M_ZERO);
757 hh->pp = pp;
758 hh->size = size;
759 g_post_event(g_resize_provider_event, hh, M_WAITOK, NULL);
760}
761
762 struct g_provider *
763 g_provider_by_name(char const *arg)
764{
765 struct g_class *cp;
766 struct g_geom *gp;
767 struct g_provider *pp, *wpp;
768
769 if (strncmp(arg, _PATH_DEV, sizeof(_PATH_DEV) - 1) == 0)
770 arg += sizeof(_PATH_DEV) - 1;
771
772 wpp = NULL;
773 LIST_FOREACH(cp, &g_classes, class) {
774 LIST_FOREACH(gp, &cp->geom, geom) {
775 LIST_FOREACH(pp, &gp->provider, provider) {
776 if (strcmp(arg, pp->name) != 0)
777 continue;
778 if ((gp->flags & G_GEOM_WITHER) == 0 &&
779 (pp->flags & G_PF_WITHER) == 0)
780 return (pp);
781 else
782 wpp = pp;
783 }
784 }
785 }
786
787 return (wpp);
788}
789
790 void
791 g_destroy_provider(struct g_provider *pp)
792{
793 struct g_geom *gp;
794 struct g_geom_alias *gap, *gaptmp;
795
796 g_topology_assert();
797 G_VALID_PROVIDER(pp);
798 KASSERT(LIST_EMPTY(&pp->consumers),
799 ("g_destroy_provider but attached"));
800 KASSERT (pp->acr == 0, ("g_destroy_provider with acr"));
801 KASSERT (pp->acw == 0, ("g_destroy_provider with acw"));
802 KASSERT (pp->ace == 0, ("g_destroy_provider with ace"));
803 g_cancel_event(pp);
804 LIST_REMOVE(pp, provider);
805 gp = pp->geom;
806 devstat_remove_entry(pp->stat);
807 /*
808 * If a callback was provided, send notification that the provider
809 * is now gone.
810 */
811 if (gp->providergone != NULL)
812 gp->providergone(pp);
813 LIST_FOREACH_SAFE(gap, &pp->aliases, ga_next, gaptmp)
814 g_free(gap);
815 g_free(pp);
816 if ((gp->flags & G_GEOM_WITHER))
817 g_do_wither();
818}
819
820 /*
821 * We keep the "geoms" list sorted by topological order (== increasing
822 * numerical rank) at all times.
823 * When an attach is done, the attaching geoms rank is invalidated
824 * and it is moved to the tail of the list.
825 * All geoms later in the sequence has their ranks reevaluated in
826 * sequence. If we cannot assign rank to a geom because it's
827 * prerequisites do not have rank, we move that element to the tail
828 * of the sequence with invalid rank as well.
829 * At some point we encounter our original geom and if we stil fail
830 * to assign it a rank, there must be a loop and we fail back to
831 * g_attach() which detach again and calls redo_rank again
832 * to fix up the damage.
833 * It would be much simpler code wise to do it recursively, but we
834 * can't risk that on the kernel stack.
835 */
836
837 static int
838 redo_rank(struct g_geom *gp)
839{
840 struct g_consumer *cp;
841 struct g_geom *gp1, *gp2;
842 int n, m;
843
844 g_topology_assert();
845 G_VALID_GEOM(gp);
846
847 /* Invalidate this geoms rank and move it to the tail */
848 gp1 = TAILQ_NEXT(gp, geoms);
849 if (gp1 != NULL) {
850 gp->rank = 0;
851 TAILQ_REMOVE(&geoms, gp, geoms);
852 TAILQ_INSERT_TAIL(&geoms, gp, geoms);
853 } else {
854 gp1 = gp;
855 }
856
857 /* re-rank the rest of the sequence */
858 for (; gp1 != NULL; gp1 = gp2) {
859 gp1->rank = 0;
860 m = 1;
861 LIST_FOREACH(cp, &gp1->consumer, consumer) {
862 if (cp->provider == NULL)
863 continue;
864 n = cp->provider->geom->rank;
865 if (n == 0) {
866 m = 0;
867 break;
868 } else if (n >= m)
869 m = n + 1;
870 }
871 gp1->rank = m;
872 gp2 = TAILQ_NEXT(gp1, geoms);
873
874 /* got a rank, moving on */
875 if (m != 0)
876 continue;
877
878 /* no rank to original geom means loop */
879 if (gp == gp1)
880 return (ELOOP);
881
882 /* no rank, put it at the end move on */
883 TAILQ_REMOVE(&geoms, gp1, geoms);
884 TAILQ_INSERT_TAIL(&geoms, gp1, geoms);
885 }
886 return (0);
887}
888
889 int
890 g_attach(struct g_consumer *cp, struct g_provider *pp)
891{
892 int error;
893
894 g_topology_assert();
895 G_VALID_CONSUMER(cp);
896 G_VALID_PROVIDER(pp);
897 g_trace(G_T_TOPOLOGY, "g_attach(%p, %p)", cp, pp);
898 KASSERT(cp->provider == NULL, ("attach but attached"));
899 if ((pp->flags & (G_PF_ORPHAN | G_PF_WITHER)) != 0)
900 return (ENXIO);
901 cp->provider = pp;
902 cp->flags &= ~G_CF_ORPHAN;
903 LIST_INSERT_HEAD(&pp->consumers, cp, consumers);
904 error = redo_rank(cp->geom);
905 if (error) {
906 LIST_REMOVE(cp, consumers);
907 cp->provider = NULL;
908 redo_rank(cp->geom);
909 }
910 return (error);
911}
912
913 void
914 g_detach(struct g_consumer *cp)
915{
916 struct g_provider *pp;
917
918 g_topology_assert();
919 G_VALID_CONSUMER(cp);
920 g_trace(G_T_TOPOLOGY, "g_detach(%p)", cp);
921 KASSERT(cp->provider != NULL, ("detach but not attached"));
922 KASSERT(cp->acr == 0, ("detach but nonzero acr"));
923 KASSERT(cp->acw == 0, ("detach but nonzero acw"));
924 KASSERT(cp->ace == 0, ("detach but nonzero ace"));
925 KASSERT(cp->nstart == cp->nend,
926 ("detach with active requests"));
927 pp = cp->provider;
928 LIST_REMOVE(cp, consumers);
929 cp->provider = NULL;
930 if ((cp->geom->flags & G_GEOM_WITHER) ||
931 (pp->geom->flags & G_GEOM_WITHER) ||
932 (pp->flags & G_PF_WITHER))
933 g_do_wither();
934 redo_rank(cp->geom);
935}
936
937 /*
938 * g_access()
939 *
940 * Access-check with delta values. The question asked is "can provider
941 * "cp" change the access counters by the relative amounts dc[rwe] ?"
942 */
943
944 int
945 g_access(struct g_consumer *cp, int dcr, int dcw, int dce)
946{
947 struct g_provider *pp;
948 struct g_geom *gp;
949 int pw, pe;
950#ifdef INVARIANTS
951 int sr, sw, se;
952#endif
953 int error;
954
955 g_topology_assert();
956 G_VALID_CONSUMER(cp);
957 pp = cp->provider;
958 KASSERT(pp != NULL, ("access but not attached"));
959 G_VALID_PROVIDER(pp);
960 gp = pp->geom;
961
962 g_trace(G_T_ACCESS, "g_access(%p(%s), %d, %d, %d)",
963 cp, pp->name, dcr, dcw, dce);
964
965 KASSERT(cp->acr + dcr >= 0, ("access resulting in negative acr"));
966 KASSERT(cp->acw + dcw >= 0, ("access resulting in negative acw"));
967 KASSERT(cp->ace + dce >= 0, ("access resulting in negative ace"));
968 KASSERT(dcr != 0 || dcw != 0 || dce != 0, ("NOP access request"));
969 KASSERT(cp->acr + dcr != 0 || cp->acw + dcw != 0 ||
970 cp->ace + dce != 0 || cp->nstart == cp->nend,
971 ("Last close with active requests"));
972 KASSERT(gp->access != NULL, ("NULL geom->access"));
973
974 /*
975 * If our class cares about being spoiled, and we have been, we
976 * are probably just ahead of the event telling us that. Fail
977 * now rather than having to unravel this later.
978 */
979 if (cp->geom->spoiled != NULL && (cp->flags & G_CF_SPOILED) &&
980 (dcr > 0 || dcw > 0 || dce > 0))
981 return (ENXIO);
982
983 /*
984 * A number of GEOM classes either need to perform an I/O on the first
985 * open or to acquire a different subsystem's lock. To do that they
986 * may have to drop the topology lock.
987 * Other GEOM classes perform special actions when opening a lower rank
988 * geom for the first time. As a result, more than one thread may
989 * end up performing the special actions.
990 * So, we prevent concurrent "first" opens by marking the consumer with
991 * special flag.
992 *
993 * Note that if the geom's access method never drops the topology lock,
994 * then we will never see G_GEOM_IN_ACCESS here.
995 */
996 while ((gp->flags & G_GEOM_IN_ACCESS) != 0) {
997 g_trace(G_T_ACCESS,
998 "%s: race on geom %s via provider %s and consumer of %s",
999 __func__, gp->name, pp->name, cp->geom->name);
1000 gp->flags |= G_GEOM_ACCESS_WAIT;
1001 g_topology_sleep(gp, 0);
1002 }
1003
1004 /*
1005 * Figure out what counts the provider would have had, if this
1006 * consumer had (r0w0e0) at this time.
1007 */
1008 pw = pp->acw - cp->acw;
1009 pe = pp->ace - cp->ace;
1010
1011 g_trace(G_T_ACCESS,
1012 "open delta:[r%dw%de%d] old:[r%dw%de%d] provider:[r%dw%de%d] %p(%s)",
1013 dcr, dcw, dce,
1014 cp->acr, cp->acw, cp->ace,
1015 pp->acr, pp->acw, pp->ace,
1016 pp, pp->name);
1017
1018 /* If foot-shooting is enabled, any open on rank#1 is OK */
1019 if ((g_debugflags & G_F_FOOTSHOOTING) && gp->rank == 1)
1020 ;
1021 /* If we try exclusive but already write: fail */
1022 else if (dce > 0 && pw > 0)
1023 return (EPERM);
1024 /* If we try write but already exclusive: fail */
1025 else if (dcw > 0 && pe > 0)
1026 return (EPERM);
1027 /* If we try to open more but provider is error'ed: fail */
1028 else if ((dcr > 0 || dcw > 0 || dce > 0) && pp->error != 0) {
1029 printf("%s(%d): provider %s has error %d set\n",
1030 __func__, __LINE__, pp->name, pp->error);
1031 return (pp->error);
1032 }
1033
1034 /* Ok then... */
1035
1036#ifdef INVARIANTS
1037 sr = cp->acr;
1038 sw = cp->acw;
1039 se = cp->ace;
1040#endif
1041 gp->flags |= G_GEOM_IN_ACCESS;
1042 error = gp->access(pp, dcr, dcw, dce);
1043 KASSERT(dcr > 0 || dcw > 0 || dce > 0 || error == 0,
1044 ("Geom provider %s::%s dcr=%d dcw=%d dce=%d error=%d failed "
1045 "closing ->access()", gp->class->name, pp->name, dcr, dcw,
1046 dce, error));
1047
1048 g_topology_assert();
1049 gp->flags &= ~G_GEOM_IN_ACCESS;
1050 KASSERT(cp->acr == sr && cp->acw == sw && cp->ace == se,
1051 ("Access counts changed during geom->access"));
1052 if ((gp->flags & G_GEOM_ACCESS_WAIT) != 0) {
1053 gp->flags &= ~G_GEOM_ACCESS_WAIT;
1054 wakeup(gp);
1055 }
1056
1057 if (!error) {
1058 /*
1059 * If we open first write, spoil any partner consumers.
1060 * If we close last write and provider is not errored,
1061 * trigger re-taste.
1062 */
1063 if (pp->acw == 0 && dcw != 0)
1064 g_spoil(pp, cp);
1065 else if (pp->acw != 0 && pp->acw == -dcw && pp->error == 0 &&
1066 !(gp->flags & G_GEOM_WITHER))
1067 g_post_event(g_new_provider_event, pp, M_WAITOK,
1068 pp, NULL);
1069
1070 pp->acr += dcr;
1071 pp->acw += dcw;
1072 pp->ace += dce;
1073 cp->acr += dcr;
1074 cp->acw += dcw;
1075 cp->ace += dce;
1076 if (pp->acr != 0 || pp->acw != 0 || pp->ace != 0)
1077 KASSERT(pp->sectorsize > 0,
1078 ("Provider %s lacks sectorsize", pp->name));
1079 if ((cp->geom->flags & G_GEOM_WITHER) &&
1080 cp->acr == 0 && cp->acw == 0 && cp->ace == 0)
1081 g_do_wither();
1082 }
1083 return (error);
1084}
1085
1086 int
1087 g_handleattr_int(struct bio *bp, const char *attribute, int val)
1088{
1089
1090 return (g_handleattr(bp, attribute, &val, sizeof val));
1091}
1092
1093 int
1094 g_handleattr_uint16_t(struct bio *bp, const char *attribute, uint16_t val)
1095{
1096
1097 return (g_handleattr(bp, attribute, &val, sizeof val));
1098}
1099
1100 int
1101 g_handleattr_off_t(struct bio *bp, const char *attribute, off_t val)
1102{
1103
1104 return (g_handleattr(bp, attribute, &val, sizeof val));
1105}
1106
1107 int
1108 g_handleattr_str(struct bio *bp, const char *attribute, const char *str)
1109{
1110
1111 return (g_handleattr(bp, attribute, str, 0));
1112}
1113
1114 int
1115 g_handleattr(struct bio *bp, const char *attribute, const void *val, int len)
1116{
1117 int error = 0;
1118
1119 if (strcmp(bp->bio_attribute, attribute))
1120 return (0);
1121 if (len == 0) {
1122 bzero(bp->bio_data, bp->bio_length);
1123 if (strlcpy(bp->bio_data, val, bp->bio_length) >=
1124 bp->bio_length) {
1125 printf("%s: %s %s bio_length %jd strlen %zu -> EFAULT\n",
1126 __func__, bp->bio_to->name, attribute,
1127 (intmax_t)bp->bio_length, strlen(val));
1128 error = EFAULT;
1129 }
1130 } else if (bp->bio_length == len) {
1131 bcopy(val, bp->bio_data, len);
1132 } else {
1133 printf("%s: %s %s bio_length %jd len %d -> EFAULT\n", __func__,
1134 bp->bio_to->name, attribute, (intmax_t)bp->bio_length, len);
1135 error = EFAULT;
1136 }
1137 if (error == 0)
1138 bp->bio_completed = bp->bio_length;
1139 g_io_deliver(bp, error);
1140 return (1);
1141}
1142
1143 int
1144 g_std_access(struct g_provider *pp,
1145 int dr __unused, int dw __unused, int de __unused)
1146{
1147
1148 g_topology_assert();
1149 G_VALID_PROVIDER(pp);
1150 return (0);
1151}
1152
1153 void
1154 g_std_done(struct bio *bp)
1155{
1156 struct bio *bp2;
1157
1158 bp2 = bp->bio_parent;
1159 if (bp2->bio_error == 0)
1160 bp2->bio_error = bp->bio_error;
1161 bp2->bio_completed += bp->bio_completed;
1162 g_destroy_bio(bp);
1163 bp2->bio_inbed++;
1164 if (bp2->bio_children == bp2->bio_inbed) {
1165 if (bp2->bio_cmd == BIO_SPEEDUP)
1166 bp2->bio_completed = bp2->bio_length;
1167 g_io_deliver(bp2, bp2->bio_error);
1168 }
1169}
1170
1171 /* XXX: maybe this is only g_slice_spoiled */
1172
1173 void
1174 g_std_spoiled(struct g_consumer *cp)
1175{
1176 struct g_geom *gp;
1177 struct g_provider *pp;
1178
1179 g_topology_assert();
1180 G_VALID_CONSUMER(cp);
1181 g_trace(G_T_TOPOLOGY, "g_std_spoiled(%p)", cp);
1182 cp->flags |= G_CF_ORPHAN;
1183 g_detach(cp);
1184 gp = cp->geom;
1185 LIST_FOREACH(pp, &gp->provider, provider)
1186 g_orphan_provider(pp, ENXIO);
1187 g_destroy_consumer(cp);
1188 if (LIST_EMPTY(&gp->provider) && LIST_EMPTY(&gp->consumer))
1189 g_destroy_geom(gp);
1190 else
1191 gp->flags |= G_GEOM_WITHER;
1192}
1193
1194 /*
1195 * Spoiling happens when a provider is opened for writing, but consumers
1196 * which are configured by in-band data are attached (slicers for instance).
1197 * Since the write might potentially change the in-band data, such consumers
1198 * need to re-evaluate their existence after the writing session closes.
1199 * We do this by (offering to) tear them down when the open for write happens
1200 * in return for a re-taste when it closes again.
1201 * Together with the fact that such consumers grab an 'e' bit whenever they
1202 * are open, regardless of mode, this ends up DTRT.
1203 */
1204
1205 static void
1206 g_spoil_event(void *arg, int flag)
1207{
1208 struct g_provider *pp;
1209 struct g_consumer *cp, *cp2;
1210
1211 g_topology_assert();
1212 if (flag == EV_CANCEL)
1213 return;
1214 pp = arg;
1215 G_VALID_PROVIDER(pp);
1216 g_trace(G_T_TOPOLOGY, "%s %p(%s:%s:%s)", __func__, pp,
1217 pp->geom->class->name, pp->geom->name, pp->name);
1218 for (cp = LIST_FIRST(&pp->consumers); cp != NULL; cp = cp2) {
1219 cp2 = LIST_NEXT(cp, consumers);
1220 if ((cp->flags & G_CF_SPOILED) == 0)
1221 continue;
1222 cp->flags &= ~G_CF_SPOILED;
1223 if (cp->geom->spoiled == NULL)
1224 continue;
1225 cp->geom->spoiled(cp);
1226 g_topology_assert();
1227 }
1228}
1229
1230 void
1231 g_spoil(struct g_provider *pp, struct g_consumer *cp)
1232{
1233 struct g_consumer *cp2;
1234
1235 g_topology_assert();
1236 G_VALID_PROVIDER(pp);
1237 G_VALID_CONSUMER(cp);
1238
1239 LIST_FOREACH(cp2, &pp->consumers, consumers) {
1240 if (cp2 == cp)
1241 continue;
1242 /*
1243 KASSERT(cp2->acr == 0, ("spoiling cp->acr = %d", cp2->acr));
1244 KASSERT(cp2->acw == 0, ("spoiling cp->acw = %d", cp2->acw));
1245*/
1246 KASSERT(cp2->ace == 0, ("spoiling cp->ace = %d", cp2->ace));
1247 cp2->flags |= G_CF_SPOILED;
1248 }
1249 g_post_event(g_spoil_event, pp, M_WAITOK, pp, NULL);
1250}
1251
1252 static void
1253 g_media_changed_event(void *arg, int flag)
1254{
1255 struct g_provider *pp;
1256 int retaste;
1257
1258 g_topology_assert();
1259 if (flag == EV_CANCEL)
1260 return;
1261 pp = arg;
1262 G_VALID_PROVIDER(pp);
1263
1264 /*
1265 * If provider was not open for writing, queue retaste after spoiling.
1266 * If it was, retaste will happen automatically on close.
1267 */
1268 retaste = (pp->acw == 0 && pp->error == 0 &&
1269 !(pp->geom->flags & G_GEOM_WITHER));
1270 g_spoil_event(arg, flag);
1271 if (retaste)
1272 g_post_event(g_new_provider_event, pp, M_WAITOK, pp, NULL);
1273}
1274
1275 int
1276 g_media_changed(struct g_provider *pp, int flag)
1277{
1278 struct g_consumer *cp;
1279
1280 LIST_FOREACH(cp, &pp->consumers, consumers)
1281 cp->flags |= G_CF_SPOILED;
1282 return (g_post_event(g_media_changed_event, pp, flag, pp, NULL));
1283}
1284
1285 int
1286 g_media_gone(struct g_provider *pp, int flag)
1287{
1288 struct g_consumer *cp;
1289
1290 LIST_FOREACH(cp, &pp->consumers, consumers)
1291 cp->flags |= G_CF_SPOILED;
1292 return (g_post_event(g_spoil_event, pp, flag, pp, NULL));
1293}
1294
1295 int
1296 g_getattr__(const char *attr, struct g_consumer *cp, void *var, int len)
1297{
1298 int error, i;
1299
1300 i = len;
1301 error = g_io_getattr(attr, cp, &i, var);
1302 if (error)
1303 return (error);
1304 if (i != len)
1305 return (EINVAL);
1306 return (0);
1307}
1308
1309 static int
1310 g_get_device_prefix_len(const char *name)
1311{
1312 int len;
1313
1314 if (strncmp(name, "ada", 3) == 0)
1315 len = 3;
1316 else if (strncmp(name, "ad", 2) == 0)
1317 len = 2;
1318 else
1319 return (0);
1320 if (name[len] < '0' || name[len] > '9')
1321 return (0);
1322 do {
1323 len++;
1324 } while (name[len] >= '0' && name[len] <= '9');
1325 return (len);
1326}
1327
1328 int
1329 g_compare_names(const char *namea, const char *nameb)
1330{
1331 int deva, devb;
1332
1333 if (strcmp(namea, nameb) == 0)
1334 return (1);
1335 deva = g_get_device_prefix_len(namea);
1336 if (deva == 0)
1337 return (0);
1338 devb = g_get_device_prefix_len(nameb);
1339 if (devb == 0)
1340 return (0);
1341 if (strcmp(namea + deva, nameb + devb) == 0)
1342 return (1);
1343 return (0);
1344}
1345
1346#if defined(DIAGNOSTIC) || defined(DDB)
1347 /*
1348 * This function walks the mesh and returns a non-zero integer if it
1349 * finds the argument pointer is an object. The return value indicates
1350 * which type of object it is believed to be. If topology is not locked,
1351 * this function is potentially dangerous, but we don't assert that the
1352 * topology lock is held when called from debugger.
1353 */
1354 int
1355 g_valid_obj(void const *ptr)
1356{
1357 struct g_class *mp;
1358 struct g_geom *gp;
1359 struct g_consumer *cp;
1360 struct g_provider *pp;
1361
1362#ifdef KDB
1363 if (kdb_active == 0)
1364#endif
1365 g_topology_assert();
1366
1367 LIST_FOREACH(mp, &g_classes, class) {
1368 if (ptr == mp)
1369 return (1);
1370 LIST_FOREACH(gp, &mp->geom, geom) {
1371 if (ptr == gp)
1372 return (2);
1373 LIST_FOREACH(cp, &gp->consumer, consumer)
1374 if (ptr == cp)
1375 return (3);
1376 LIST_FOREACH(pp, &gp->provider, provider)
1377 if (ptr == pp)
1378 return (4);
1379 }
1380 }
1381 return(0);
1382}
1383#endif
1384
1385#ifdef DDB
1386
1387#define gprintf(...) do { \
1388 db_printf("%*s", indent, ""); \
1389 db_printf(__VA_ARGS__); \
1390} while (0)
1391#define gprintln(...) do { \
1392 gprintf(__VA_ARGS__); \
1393 db_printf("\n"); \
1394} while (0)
1395
1396#define ADDFLAG(obj, flag, sflag) do { \
1397 if ((obj)->flags & (flag)) { \
1398 if (comma) \
1399 strlcat(str, ",", size); \
1400 strlcat(str, (sflag), size); \
1401 comma = 1; \
1402 } \
1403} while (0)
1404
1405 static char *
1406 provider_flags_to_string(struct g_provider *pp, char *str, size_t size)
1407{
1408 int comma = 0;
1409
1410 bzero(str, size);
1411 if (pp->flags == 0) {
1412 strlcpy(str, "NONE", size);
1413 return (str);
1414 }
1415 ADDFLAG(pp, G_PF_WITHER, "G_PF_WITHER");
1416 ADDFLAG(pp, G_PF_ORPHAN, "G_PF_ORPHAN");
1417 return (str);
1418}
1419
1420 static char *
1421 geom_flags_to_string(struct g_geom *gp, char *str, size_t size)
1422{
1423 int comma = 0;
1424
1425 bzero(str, size);
1426 if (gp->flags == 0) {
1427 strlcpy(str, "NONE", size);
1428 return (str);
1429 }
1430 ADDFLAG(gp, G_GEOM_WITHER, "G_GEOM_WITHER");
1431 return (str);
1432}
1433 static void
1434 db_show_geom_consumer(int indent, struct g_consumer *cp)
1435{
1436
1437 if (indent == 0) {
1438 gprintln("consumer: %p", cp);
1439 gprintln(" class: %s (%p)", cp->geom->class->name,
1440 cp->geom->class);
1441 gprintln(" geom: %s (%p)", cp->geom->name, cp->geom);
1442 if (cp->provider == NULL)
1443 gprintln(" provider: none");
1444 else {
1445 gprintln(" provider: %s (%p)", cp->provider->name,
1446 cp->provider);
1447 }
1448 gprintln(" access: r%dw%de%d", cp->acr, cp->acw, cp->ace);
1449 gprintln(" flags: 0x%04x", cp->flags);
1450#ifdef INVARIANTS
1451 gprintln(" nstart: %u", cp->nstart);
1452 gprintln(" nend: %u", cp->nend);
1453#endif
1454 } else {
1455 gprintf("consumer: %p (%s), access=r%dw%de%d", cp,
1456 cp->provider != NULL ? cp->provider->name : "none",
1457 cp->acr, cp->acw, cp->ace);
1458 if (cp->flags)
1459 db_printf(", flags=0x%04x", cp->flags);
1460 db_printf("\n");
1461 }
1462}
1463
1464 static void
1465 db_show_geom_provider(int indent, struct g_provider *pp)
1466{
1467 struct g_consumer *cp;
1468 char flags[64];
1469
1470 if (indent == 0) {
1471 gprintln("provider: %s (%p)", pp->name, pp);
1472 gprintln(" class: %s (%p)", pp->geom->class->name,
1473 pp->geom->class);
1474 gprintln(" geom: %s (%p)", pp->geom->name, pp->geom);
1475 gprintln(" mediasize: %jd", (intmax_t)pp->mediasize);
1476 gprintln(" sectorsize: %u", pp->sectorsize);
1477 gprintln(" stripesize: %ju", (uintmax_t)pp->stripesize);
1478 gprintln(" stripeoffset: %ju", (uintmax_t)pp->stripeoffset);
1479 gprintln(" access: r%dw%de%d", pp->acr, pp->acw,
1480 pp->ace);
1481 gprintln(" flags: %s (0x%04x)",
1482 provider_flags_to_string(pp, flags, sizeof(flags)),
1483 pp->flags);
1484 gprintln(" error: %d", pp->error);
1485 if (LIST_EMPTY(&pp->consumers))
1486 gprintln(" consumers: none");
1487 } else {
1488 gprintf("provider: %s (%p), access=r%dw%de%d",
1489 pp->name, pp, pp->acr, pp->acw, pp->ace);
1490 if (pp->flags != 0) {
1491 db_printf(", flags=%s (0x%04x)",
1492 provider_flags_to_string(pp, flags, sizeof(flags)),
1493 pp->flags);
1494 }
1495 db_printf("\n");
1496 }
1497 if (!LIST_EMPTY(&pp->consumers)) {
1498 LIST_FOREACH(cp, &pp->consumers, consumers) {
1499 db_show_geom_consumer(indent + 2, cp);
1500 if (db_pager_quit)
1501 break;
1502 }
1503 }
1504}
1505
1506 static void
1507 db_show_geom_geom(int indent, struct g_geom *gp)
1508{
1509 struct g_provider *pp;
1510 struct g_consumer *cp;
1511 char flags[64];
1512
1513 if (indent == 0) {
1514 gprintln("geom: %s (%p)", gp->name, gp);
1515 gprintln(" class: %s (%p)", gp->class->name, gp->class);
1516 gprintln(" flags: %s (0x%04x)",
1517 geom_flags_to_string(gp, flags, sizeof(flags)), gp->flags);
1518 gprintln(" rank: %d", gp->rank);
1519 if (LIST_EMPTY(&gp->provider))
1520 gprintln(" providers: none");
1521 if (LIST_EMPTY(&gp->consumer))
1522 gprintln(" consumers: none");
1523 } else {
1524 gprintf("geom: %s (%p), rank=%d", gp->name, gp, gp->rank);
1525 if (gp->flags != 0) {
1526 db_printf(", flags=%s (0x%04x)",
1527 geom_flags_to_string(gp, flags, sizeof(flags)),
1528 gp->flags);
1529 }
1530 db_printf("\n");
1531 }
1532 if (!LIST_EMPTY(&gp->provider)) {
1533 LIST_FOREACH(pp, &gp->provider, provider) {
1534 db_show_geom_provider(indent + 2, pp);
1535 if (db_pager_quit)
1536 break;
1537 }
1538 }
1539 if (!LIST_EMPTY(&gp->consumer)) {
1540 LIST_FOREACH(cp, &gp->consumer, consumer) {
1541 db_show_geom_consumer(indent + 2, cp);
1542 if (db_pager_quit)
1543 break;
1544 }
1545 }
1546}
1547
1548 static void
1549 db_show_geom_class(struct g_class *mp)
1550{
1551 struct g_geom *gp;
1552
1553 db_printf("class: %s (%p)\n", mp->name, mp);
1554 LIST_FOREACH(gp, &mp->geom, geom) {
1555 db_show_geom_geom(2, gp);
1556 if (db_pager_quit)
1557 break;
1558 }
1559}
1560
1561 /*
1562 * Print the GEOM topology or the given object.
1563 */
1564 DB_SHOW_COMMAND(geom, db_show_geom)
1565{
1566 struct g_class *mp;
1567
1568 if (!have_addr) {
1569 /* No address given, print the entire topology. */
1570 LIST_FOREACH(mp, &g_classes, class) {
1571 db_show_geom_class(mp);
1572 db_printf("\n");
1573 if (db_pager_quit)
1574 break;
1575 }
1576 } else {
1577 switch (g_valid_obj((void *)addr)) {
1578 case 1:
1579 db_show_geom_class((struct g_class *)addr);
1580 break;
1581 case 2:
1582 db_show_geom_geom(0, (struct g_geom *)addr);
1583 break;
1584 case 3:
1585 db_show_geom_consumer(0, (struct g_consumer *)addr);
1586 break;
1587 case 4:
1588 db_show_geom_provider(0, (struct g_provider *)addr);
1589 break;
1590 default:
1591 db_printf("Not a GEOM object.\n");
1592 break;
1593 }
1594 }
1595}
1596
1597 static void
1598 db_print_bio_cmd(struct bio *bp)
1599{
1600 db_printf(" cmd: ");
1601 switch (bp->bio_cmd) {
1602 case BIO_READ: db_printf("BIO_READ"); break;
1603 case BIO_WRITE: db_printf("BIO_WRITE"); break;
1604 case BIO_DELETE: db_printf("BIO_DELETE"); break;
1605 case BIO_GETATTR: db_printf("BIO_GETATTR"); break;
1606 case BIO_FLUSH: db_printf("BIO_FLUSH"); break;
1607 case BIO_CMD0: db_printf("BIO_CMD0"); break;
1608 case BIO_CMD1: db_printf("BIO_CMD1"); break;
1609 case BIO_CMD2: db_printf("BIO_CMD2"); break;
1610 case BIO_ZONE: db_printf("BIO_ZONE"); break;
1611 default: db_printf("UNKNOWN"); break;
1612 }
1613 db_printf("\n");
1614}
1615
1616 static void
1617 db_print_bio_flags(struct bio *bp)
1618{
1619 int comma;
1620
1621 comma = 0;
1622 db_printf(" flags: ");
1623 if (bp->bio_flags & BIO_ERROR) {
1624 db_printf("BIO_ERROR");
1625 comma = 1;
1626 }
1627 if (bp->bio_flags & BIO_DONE) {
1628 db_printf("%sBIO_DONE", (comma ? ", " : ""));
1629 comma = 1;
1630 }
1631 if (bp->bio_flags & BIO_ONQUEUE)
1632 db_printf("%sBIO_ONQUEUE", (comma ? ", " : ""));
1633 db_printf("\n");
1634}
1635
1636 /*
1637 * Print useful information in a BIO
1638 */
1639 DB_SHOW_COMMAND(bio, db_show_bio)
1640{
1641 struct bio *bp;
1642
1643 if (have_addr) {
1644 bp = (struct bio *)addr;
1645 db_printf("BIO %p\n", bp);
1646 db_print_bio_cmd(bp);
1647 db_print_bio_flags(bp);
1648 db_printf(" cflags: 0x%hx\n", bp->bio_cflags);
1649 db_printf(" pflags: 0x%hx\n", bp->bio_pflags);
1650 db_printf(" offset: %jd\n", (intmax_t)bp->bio_offset);
1651 db_printf(" length: %jd\n", (intmax_t)bp->bio_length);
1652 db_printf(" bcount: %ld\n", bp->bio_bcount);
1653 db_printf(" resid: %ld\n", bp->bio_resid);
1654 db_printf(" completed: %jd\n", (intmax_t)bp->bio_completed);
1655 db_printf(" children: %u\n", bp->bio_children);
1656 db_printf(" inbed: %u\n", bp->bio_inbed);
1657 db_printf(" error: %d\n", bp->bio_error);
1658 db_printf(" parent: %p\n", bp->bio_parent);
1659 db_printf(" driver1: %p\n", bp->bio_driver1);
1660 db_printf(" driver2: %p\n", bp->bio_driver2);
1661 db_printf(" caller1: %p\n", bp->bio_caller1);
1662 db_printf(" caller2: %p\n", bp->bio_caller2);
1663 db_printf(" bio_from: %p\n", bp->bio_from);
1664 db_printf(" bio_to: %p\n", bp->bio_to);
1665
1666#if defined(BUF_TRACKING) || defined(FULL_BUF_TRACKING)
1667 db_printf(" bio_track_bp: %p\n", bp->bio_track_bp);
1668#endif
1669 }
1670}
1671
1672#undef gprintf
1673#undef gprintln
1674#undef ADDFLAG
1675
1676#endif /* DDB */
1677 

AltStyle によって変換されたページ (->オリジナル) /