Super User's BSD Cross Reference: /FreeBSD/lib/libc/rpc/svc_dg.c

1 /* $NetBSD: svc_dg.c,v 1.4 2000年07月06日 03:10:35 christos Exp $ */
2
3 /*-
4 * SPDX-License-Identifier: BSD-3-Clause
5 *
6 * Copyright (c) 2009, Sun Microsystems, Inc.
7 * All rights reserved.
8 *
9 * Redistribution and use in source and binary forms, with or without
10 * modification, are permitted provided that the following conditions are met:
11 * - Redistributions of source code must retain the above copyright notice,
12 * this list of conditions and the following disclaimer.
13 * - Redistributions in binary form must reproduce the above copyright notice,
14 * this list of conditions and the following disclaimer in the documentation
15 * and/or other materials provided with the distribution.
16 * - Neither the name of Sun Microsystems, Inc. nor the names of its
17 * contributors may be used to endorse or promote products derived
18 * from this software without specific prior written permission.
19 *
20 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
21 * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
22 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
23 * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE
24 * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
25 * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
26 * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
27 * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
28 * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
29 * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
30 * POSSIBILITY OF SUCH DAMAGE.
31 */
32
33 /*
34 * Copyright (c) 1986-1991 by Sun Microsystems Inc.
35 */
36
37#if defined(LIBC_SCCS) && !defined(lint)
38#ident "@(#)svc_dg.c 1.17 94/04/24 SMI"
39#endif
40#include <sys/cdefs.h>
41 __FBSDID("$FreeBSD$");
42
43 /*
44 * svc_dg.c, Server side for connectionless RPC.
45 *
46 * Does some caching in the hopes of achieving execute-at-most-once semantics.
47 */
48
49#include "namespace.h"
50#include "reentrant.h"
51#include <sys/types.h>
52#include <sys/socket.h>
53#include <rpc/rpc.h>
54#include <rpc/svc_dg.h>
55#include <assert.h>
56#include <errno.h>
57#include <unistd.h>
58#include <stdio.h>
59#include <stdlib.h>
60#include <string.h>
61#ifdef RPC_CACHE_DEBUG
62#include <netconfig.h>
63#include <netdir.h>
64#endif
65#include <err.h>
66#include "un-namespace.h"
67
68#include "rpc_com.h"
69#include "mt_misc.h"
70
71#define su_data(xprt) ((struct svc_dg_data *)((xprt)->xp_p2))
72#define rpc_buffer(xprt) ((xprt)->xp_p1)
73
74#ifndef MAX
75#define MAX(a, b) (((a) > (b)) ? (a) : (b))
76#endif
77
78 static void svc_dg_ops(SVCXPRT *);
79 static enum xprt_stat svc_dg_stat(SVCXPRT *);
80 static bool_t svc_dg_recv(SVCXPRT *, struct rpc_msg *);
81 static bool_t svc_dg_reply(SVCXPRT *, struct rpc_msg *);
82 static bool_t svc_dg_getargs(SVCXPRT *, xdrproc_t, void *);
83 static bool_t svc_dg_freeargs(SVCXPRT *, xdrproc_t, void *);
84 static void svc_dg_destroy(SVCXPRT *);
85 static bool_t svc_dg_control(SVCXPRT *, const u_int, void *);
86 static int cache_get(SVCXPRT *, struct rpc_msg *, char **, size_t *);
87 static void cache_set(SVCXPRT *, size_t);
88 int svc_dg_enablecache(SVCXPRT *, u_int);
89
90 /*
91 * Usage:
92 * xprt = svc_dg_create(sock, sendsize, recvsize);
93 * Does other connectionless specific initializations.
94 * Once *xprt is initialized, it is registered.
95 * see (svc.h, xprt_register). If recvsize or sendsize are 0 suitable
96 * system defaults are chosen.
97 * The routines returns NULL if a problem occurred.
98 */
99 static const char svc_dg_str[] = "svc_dg_create: %s";
100 static const char svc_dg_err1[] = "could not get transport information";
101 static const char svc_dg_err2[] = "transport does not support data transfer";
102 static const char svc_dg_err3[] = "getsockname failed";
103 static const char svc_dg_err4[] = "cannot set IP_RECVDSTADDR";
104 static const char __no_mem_str[] = "out of memory";
105
106 SVCXPRT *
107 svc_dg_create(int fd, u_int sendsize, u_int recvsize)
108{
109 SVCXPRT *xprt;
110 struct svc_dg_data *su = NULL;
111 struct __rpc_sockinfo si;
112 struct sockaddr_storage ss;
113 socklen_t slen;
114
115 if (!__rpc_fd2sockinfo(fd, &si)) {
116 warnx(svc_dg_str, svc_dg_err1);
117 return (NULL);
118 }
119 /*
120 * Find the receive and the send size
121 */
122 sendsize = __rpc_get_t_size(si.si_af, si.si_proto, (int)sendsize);
123 recvsize = __rpc_get_t_size(si.si_af, si.si_proto, (int)recvsize);
124 if ((sendsize == 0) || (recvsize == 0)) {
125 warnx(svc_dg_str, svc_dg_err2);
126 return (NULL);
127 }
128
129 xprt = svc_xprt_alloc();
130 if (xprt == NULL)
131 goto freedata;
132
133 su = mem_alloc(sizeof (*su));
134 if (su == NULL)
135 goto freedata;
136 su->su_iosz = ((MAX(sendsize, recvsize) + 3) / 4) * 4;
137 if ((rpc_buffer(xprt) = mem_alloc(su->su_iosz)) == NULL)
138 goto freedata;
139 xdrmem_create(&(su->su_xdrs), rpc_buffer(xprt), su->su_iosz,
140 XDR_DECODE);
141 su->su_cache = NULL;
142 xprt->xp_fd = fd;
143 xprt->xp_p2 = su;
144 xprt->xp_verf.oa_base = su->su_verfbody;
145 svc_dg_ops(xprt);
146 xprt->xp_rtaddr.maxlen = sizeof (struct sockaddr_storage);
147
148 slen = sizeof ss;
149 if (_getsockname(fd, (struct sockaddr *)(void *)&ss, &slen) < 0) {
150 warnx(svc_dg_str, svc_dg_err3);
151 goto freedata_nowarn;
152 }
153 xprt->xp_ltaddr.buf = mem_alloc(sizeof (struct sockaddr_storage));
154 xprt->xp_ltaddr.maxlen = sizeof (struct sockaddr_storage);
155 xprt->xp_ltaddr.len = slen;
156 memcpy(xprt->xp_ltaddr.buf, &ss, slen);
157
158 if (ss.ss_family == AF_INET) {
159 struct sockaddr_in *sin;
160 static const int true_value = 1;
161
162 sin = (struct sockaddr_in *)(void *)&ss;
163 if (sin->sin_addr.s_addr == INADDR_ANY) {
164 su->su_srcaddr.buf = mem_alloc(sizeof (ss));
165 su->su_srcaddr.maxlen = sizeof (ss);
166
167 if (_setsockopt(fd, IPPROTO_IP, IP_RECVDSTADDR,
168 &true_value, sizeof(true_value))) {
169 warnx(svc_dg_str, svc_dg_err4);
170 goto freedata_nowarn;
171 }
172 }
173 }
174
175 xprt_register(xprt);
176 return (xprt);
177 freedata:
178 (void) warnx(svc_dg_str, __no_mem_str);
179 freedata_nowarn:
180 if (xprt) {
181 if (su)
182 (void) mem_free(su, sizeof (*su));
183 svc_xprt_free(xprt);
184 }
185 return (NULL);
186}
187
188 /*ARGSUSED*/
189 static enum xprt_stat
190 svc_dg_stat(SVCXPRT *xprt)
191{
192 return (XPRT_IDLE);
193}
194
195 static int
196 svc_dg_recvfrom(int fd, char *buf, int buflen,
197 struct sockaddr *raddr, socklen_t *raddrlen,
198 struct sockaddr *laddr, socklen_t *laddrlen)
199{
200 struct msghdr msg;
201 struct iovec msg_iov[1];
202 struct sockaddr_in *lin = (struct sockaddr_in *)laddr;
203 int rlen;
204 bool_t have_lin = FALSE;
205 char tmp[CMSG_LEN(sizeof(*lin))];
206 struct cmsghdr *cmsg;
207
208 memset((char *)&msg, 0, sizeof(msg));
209 msg_iov[0].iov_base = buf;
210 msg_iov[0].iov_len = buflen;
211 msg.msg_iov = msg_iov;
212 msg.msg_iovlen = 1;
213 msg.msg_namelen = *raddrlen;
214 msg.msg_name = (char *)raddr;
215 if (laddr != NULL) {
216 msg.msg_control = (caddr_t)tmp;
217 msg.msg_controllen = CMSG_LEN(sizeof(*lin));
218 }
219 rlen = _recvmsg(fd, &msg, 0);
220 if (rlen >= 0)
221 *raddrlen = msg.msg_namelen;
222
223 if (rlen == -1 || laddr == NULL ||
224 msg.msg_controllen < sizeof(struct cmsghdr) ||
225 msg.msg_flags & MSG_CTRUNC)
226 return rlen;
227
228 for (cmsg = CMSG_FIRSTHDR(&msg); cmsg != NULL;
229 cmsg = CMSG_NXTHDR(&msg, cmsg)) {
230 if (cmsg->cmsg_level == IPPROTO_IP &&
231 cmsg->cmsg_type == IP_RECVDSTADDR) {
232 have_lin = TRUE;
233 memcpy(&lin->sin_addr,
234 (struct in_addr *)CMSG_DATA(cmsg),
235 sizeof(struct in_addr));
236 break;
237 }
238 }
239
240 lin->sin_family = AF_INET;
241 lin->sin_port = 0;
242 *laddrlen = sizeof(struct sockaddr_in);
243
244 if (!have_lin)
245 lin->sin_addr.s_addr = INADDR_ANY;
246
247 return rlen;
248}
249
250 static bool_t
251 svc_dg_recv(SVCXPRT *xprt, struct rpc_msg *msg)
252{
253 struct svc_dg_data *su = su_data(xprt);
254 XDR *xdrs = &(su->su_xdrs);
255 char *reply;
256 struct sockaddr_storage ss;
257 socklen_t alen;
258 size_t replylen;
259 ssize_t rlen;
260
261 again:
262 alen = sizeof (struct sockaddr_storage);
263 rlen = svc_dg_recvfrom(xprt->xp_fd, rpc_buffer(xprt), su->su_iosz,
264 (struct sockaddr *)(void *)&ss, &alen,
265 (struct sockaddr *)su->su_srcaddr.buf, &su->su_srcaddr.len);
266 if (rlen == -1 && errno == EINTR)
267 goto again;
268 if (rlen == -1 || (rlen < (ssize_t)(4 * sizeof (u_int32_t))))
269 return (FALSE);
270 if (xprt->xp_rtaddr.len < alen) {
271 if (xprt->xp_rtaddr.len != 0)
272 mem_free(xprt->xp_rtaddr.buf, xprt->xp_rtaddr.len);
273 xprt->xp_rtaddr.buf = mem_alloc(alen);
274 xprt->xp_rtaddr.len = alen;
275 }
276 memcpy(xprt->xp_rtaddr.buf, &ss, alen);
277#ifdef PORTMAP
278 if (ss.ss_family == AF_INET) {
279 xprt->xp_raddr = *(struct sockaddr_in *)xprt->xp_rtaddr.buf;
280 xprt->xp_addrlen = sizeof (struct sockaddr_in);
281 }
282#endif /* PORTMAP */
283 xdrs->x_op = XDR_DECODE;
284 XDR_SETPOS(xdrs, 0);
285 if (! xdr_callmsg(xdrs, msg)) {
286 return (FALSE);
287 }
288 su->su_xid = msg->rm_xid;
289 if (su->su_cache != NULL) {
290 if (cache_get(xprt, msg, &reply, &replylen)) {
291 (void)_sendto(xprt->xp_fd, reply, replylen, 0,
292 (struct sockaddr *)(void *)&ss, alen);
293 return (FALSE);
294 }
295 }
296 return (TRUE);
297}
298
299 static int
300 svc_dg_sendto(int fd, char *buf, int buflen,
301 const struct sockaddr *raddr, socklen_t raddrlen,
302 const struct sockaddr *laddr, socklen_t laddrlen)
303{
304 struct msghdr msg;
305 struct iovec msg_iov[1];
306 struct sockaddr_in *laddr_in = (struct sockaddr_in *)laddr;
307 struct in_addr *lin = &laddr_in->sin_addr;
308 char tmp[CMSG_SPACE(sizeof(*lin))];
309 struct cmsghdr *cmsg;
310
311 memset((char *)&msg, 0, sizeof(msg));
312 msg_iov[0].iov_base = buf;
313 msg_iov[0].iov_len = buflen;
314 msg.msg_iov = msg_iov;
315 msg.msg_iovlen = 1;
316 msg.msg_namelen = raddrlen;
317 msg.msg_name = (char *)raddr;
318
319 if (laddr != NULL && laddr->sa_family == AF_INET &&
320 lin->s_addr != INADDR_ANY) {
321 msg.msg_control = (caddr_t)tmp;
322 msg.msg_controllen = CMSG_LEN(sizeof(*lin));
323 cmsg = CMSG_FIRSTHDR(&msg);
324 cmsg->cmsg_len = CMSG_LEN(sizeof(*lin));
325 cmsg->cmsg_level = IPPROTO_IP;
326 cmsg->cmsg_type = IP_SENDSRCADDR;
327 memcpy(CMSG_DATA(cmsg), lin, sizeof(*lin));
328 }
329
330 return _sendmsg(fd, &msg, 0);
331}
332
333 static bool_t
334 svc_dg_reply(SVCXPRT *xprt, struct rpc_msg *msg)
335{
336 struct svc_dg_data *su = su_data(xprt);
337 XDR *xdrs = &(su->su_xdrs);
338 bool_t stat = TRUE;
339 size_t slen;
340 xdrproc_t xdr_proc;
341 caddr_t xdr_where;
342
343 xdrs->x_op = XDR_ENCODE;
344 XDR_SETPOS(xdrs, 0);
345 msg->rm_xid = su->su_xid;
346 if (msg->rm_reply.rp_stat == MSG_ACCEPTED &&
347 msg->rm_reply.rp_acpt.ar_stat == SUCCESS) {
348 xdr_proc = msg->acpted_rply.ar_results.proc;
349 xdr_where = msg->acpted_rply.ar_results.where;
350 msg->acpted_rply.ar_results.proc = (xdrproc_t) xdr_void;
351 msg->acpted_rply.ar_results.where = NULL;
352
353 if (!xdr_replymsg(xdrs, msg) ||
354 !SVCAUTH_WRAP(&SVC_AUTH(xprt), xdrs, xdr_proc, xdr_where))
355 stat = FALSE;
356 } else {
357 stat = xdr_replymsg(xdrs, msg);
358 }
359 if (stat) {
360 slen = XDR_GETPOS(xdrs);
361 if (svc_dg_sendto(xprt->xp_fd, rpc_buffer(xprt), slen,
362 (struct sockaddr *)xprt->xp_rtaddr.buf,
363 (socklen_t)xprt->xp_rtaddr.len,
364 (struct sockaddr *)su->su_srcaddr.buf,
365 (socklen_t)su->su_srcaddr.len) == (ssize_t) slen) {
366 stat = TRUE;
367 if (su->su_cache)
368 cache_set(xprt, slen);
369 }
370 }
371 return (stat);
372}
373
374 static bool_t
375 svc_dg_getargs(SVCXPRT *xprt, xdrproc_t xdr_args, void *args_ptr)
376{
377 struct svc_dg_data *su;
378
379 assert(xprt != NULL);
380 su = su_data(xprt);
381 return (SVCAUTH_UNWRAP(&SVC_AUTH(xprt),
382 &su->su_xdrs, xdr_args, args_ptr));
383}
384
385 static bool_t
386 svc_dg_freeargs(SVCXPRT *xprt, xdrproc_t xdr_args, void *args_ptr)
387{
388 XDR *xdrs = &(su_data(xprt)->su_xdrs);
389
390 xdrs->x_op = XDR_FREE;
391 return (*xdr_args)(xdrs, args_ptr);
392}
393
394 static void
395 svc_dg_destroy(SVCXPRT *xprt)
396{
397 struct svc_dg_data *su = su_data(xprt);
398
399 xprt_unregister(xprt);
400 if (xprt->xp_fd != -1)
401 (void)_close(xprt->xp_fd);
402 XDR_DESTROY(&(su->su_xdrs));
403 (void) mem_free(rpc_buffer(xprt), su->su_iosz);
404 if (su->su_srcaddr.buf)
405 (void) mem_free(su->su_srcaddr.buf, su->su_srcaddr.maxlen);
406 (void) mem_free(su, sizeof (*su));
407 if (xprt->xp_rtaddr.buf)
408 (void) mem_free(xprt->xp_rtaddr.buf, xprt->xp_rtaddr.maxlen);
409 if (xprt->xp_ltaddr.buf)
410 (void) mem_free(xprt->xp_ltaddr.buf, xprt->xp_ltaddr.maxlen);
411 free(xprt->xp_tp);
412 svc_xprt_free(xprt);
413}
414
415 static bool_t
416 /*ARGSUSED*/
417 svc_dg_control(SVCXPRT *xprt, const u_int rq, void *in)
418{
419 return (FALSE);
420}
421
422 static void
423 svc_dg_ops(SVCXPRT *xprt)
424{
425 static struct xp_ops ops;
426 static struct xp_ops2 ops2;
427
428 /* VARIABLES PROTECTED BY ops_lock: ops */
429
430 mutex_lock(&ops_lock);
431 if (ops.xp_recv == NULL) {
432 ops.xp_recv = svc_dg_recv;
433 ops.xp_stat = svc_dg_stat;
434 ops.xp_getargs = svc_dg_getargs;
435 ops.xp_reply = svc_dg_reply;
436 ops.xp_freeargs = svc_dg_freeargs;
437 ops.xp_destroy = svc_dg_destroy;
438 ops2.xp_control = svc_dg_control;
439 }
440 xprt->xp_ops = &ops;
441 xprt->xp_ops2 = &ops2;
442 mutex_unlock(&ops_lock);
443}
444
445 /* The CACHING COMPONENT */
446
447 /*
448 * Could have been a separate file, but some part of it depends upon the
449 * private structure of the client handle.
450 *
451 * Fifo cache for cl server
452 * Copies pointers to reply buffers into fifo cache
453 * Buffers are sent again if retransmissions are detected.
454 */
455
456#define SPARSENESS 4 /* 75% sparse */
457
458#define ALLOC(type, size) \
459 (type *) mem_alloc((sizeof (type) * (size)))
460
461#define MEMZERO(addr, type, size) \
462 (void) memset((void *) (addr), 0, sizeof (type) * (int) (size))
463
464#define FREE(addr, type, size) \
465 mem_free((addr), (sizeof (type) * (size)))
466
467 /*
468 * An entry in the cache
469 */
470 typedef struct cache_node *cache_ptr;
471 struct cache_node {
472 /*
473 * Index into cache is xid, proc, vers, prog and address
474 */
475 u_int32_t cache_xid;
476 rpcproc_t cache_proc;
477 rpcvers_t cache_vers;
478 rpcprog_t cache_prog;
479 struct netbuf cache_addr;
480 /*
481 * The cached reply and length
482 */
483 char *cache_reply;
484 size_t cache_replylen;
485 /*
486 * Next node on the list, if there is a collision
487 */
488 cache_ptr cache_next;
489};
490
491 /*
492 * The entire cache
493 */
494 struct cl_cache {
495 u_int uc_size; /* size of cache */
496 cache_ptr *uc_entries; /* hash table of entries in cache */
497 cache_ptr *uc_fifo; /* fifo list of entries in cache */
498 u_int uc_nextvictim; /* points to next victim in fifo list */
499 rpcprog_t uc_prog; /* saved program number */
500 rpcvers_t uc_vers; /* saved version number */
501 rpcproc_t uc_proc; /* saved procedure number */
502};
503
504
505 /*
506 * the hashing function
507 */
508#define CACHE_LOC(transp, xid) \
509 (xid % (SPARSENESS * ((struct cl_cache *) \
510 su_data(transp)->su_cache)->uc_size))
511
512 /*
513 * Enable use of the cache. Returns 1 on success, 0 on failure.
514 * Note: there is no disable.
515 */
516 static const char cache_enable_str[] = "svc_enablecache: %s %s";
517 static const char alloc_err[] = "could not allocate cache ";
518 static const char enable_err[] = "cache already enabled";
519
520 int
521 svc_dg_enablecache(SVCXPRT *transp, u_int size)
522{
523 struct svc_dg_data *su = su_data(transp);
524 struct cl_cache *uc;
525
526 mutex_lock(&dupreq_lock);
527 if (su->su_cache != NULL) {
528 (void) warnx(cache_enable_str, enable_err, " ");
529 mutex_unlock(&dupreq_lock);
530 return (0);
531 }
532 uc = ALLOC(struct cl_cache, 1);
533 if (uc == NULL) {
534 warnx(cache_enable_str, alloc_err, " ");
535 mutex_unlock(&dupreq_lock);
536 return (0);
537 }
538 uc->uc_size = size;
539 uc->uc_nextvictim = 0;
540 uc->uc_entries = ALLOC(cache_ptr, size * SPARSENESS);
541 if (uc->uc_entries == NULL) {
542 warnx(cache_enable_str, alloc_err, "data");
543 FREE(uc, struct cl_cache, 1);
544 mutex_unlock(&dupreq_lock);
545 return (0);
546 }
547 MEMZERO(uc->uc_entries, cache_ptr, size * SPARSENESS);
548 uc->uc_fifo = ALLOC(cache_ptr, size);
549 if (uc->uc_fifo == NULL) {
550 warnx(cache_enable_str, alloc_err, "fifo");
551 FREE(uc->uc_entries, cache_ptr, size * SPARSENESS);
552 FREE(uc, struct cl_cache, 1);
553 mutex_unlock(&dupreq_lock);
554 return (0);
555 }
556 MEMZERO(uc->uc_fifo, cache_ptr, size);
557 su->su_cache = (char *)(void *)uc;
558 mutex_unlock(&dupreq_lock);
559 return (1);
560}
561
562 /*
563 * Set an entry in the cache. It assumes that the uc entry is set from
564 * the earlier call to cache_get() for the same procedure. This will always
565 * happen because cache_get() is calle by svc_dg_recv and cache_set() is called
566 * by svc_dg_reply(). All this hoopla because the right RPC parameters are
567 * not available at svc_dg_reply time.
568 */
569
570 static const char cache_set_str[] = "cache_set: %s";
571 static const char cache_set_err1[] = "victim not found";
572 static const char cache_set_err2[] = "victim alloc failed";
573 static const char cache_set_err3[] = "could not allocate new rpc buffer";
574
575 static void
576 cache_set(SVCXPRT *xprt, size_t replylen)
577{
578 cache_ptr victim;
579 cache_ptr *vicp;
580 struct svc_dg_data *su = su_data(xprt);
581 struct cl_cache *uc = (struct cl_cache *) su->su_cache;
582 u_int loc;
583 char *newbuf;
584#ifdef RPC_CACHE_DEBUG
585 struct netconfig *nconf;
586 char *uaddr;
587#endif
588
589 mutex_lock(&dupreq_lock);
590 /*
591 * Find space for the new entry, either by
592 * reusing an old entry, or by mallocing a new one
593 */
594 victim = uc->uc_fifo[uc->uc_nextvictim];
595 if (victim != NULL) {
596 loc = CACHE_LOC(xprt, victim->cache_xid);
597 for (vicp = &uc->uc_entries[loc];
598 *vicp != NULL && *vicp != victim;
599 vicp = &(*vicp)->cache_next)
600 ;
601 if (*vicp == NULL) {
602 warnx(cache_set_str, cache_set_err1);
603 mutex_unlock(&dupreq_lock);
604 return;
605 }
606 *vicp = victim->cache_next; /* remove from cache */
607 newbuf = victim->cache_reply;
608 } else {
609 victim = ALLOC(struct cache_node, 1);
610 if (victim == NULL) {
611 warnx(cache_set_str, cache_set_err2);
612 mutex_unlock(&dupreq_lock);
613 return;
614 }
615 newbuf = mem_alloc(su->su_iosz);
616 if (newbuf == NULL) {
617 warnx(cache_set_str, cache_set_err3);
618 FREE(victim, struct cache_node, 1);
619 mutex_unlock(&dupreq_lock);
620 return;
621 }
622 }
623
624 /*
625 * Store it away
626 */
627#ifdef RPC_CACHE_DEBUG
628 if (nconf = getnetconfigent(xprt->xp_netid)) {
629 uaddr = taddr2uaddr(nconf, &xprt->xp_rtaddr);
630 freenetconfigent(nconf);
631 printf(
632 "cache set for xid= %x prog=%d vers=%d proc=%d for rmtaddr=%s\n",
633 su->su_xid, uc->uc_prog, uc->uc_vers,
634 uc->uc_proc, uaddr);
635 free(uaddr);
636 }
637#endif
638 victim->cache_replylen = replylen;
639 victim->cache_reply = rpc_buffer(xprt);
640 rpc_buffer(xprt) = newbuf;
641 xdrmem_create(&(su->su_xdrs), rpc_buffer(xprt),
642 su->su_iosz, XDR_ENCODE);
643 victim->cache_xid = su->su_xid;
644 victim->cache_proc = uc->uc_proc;
645 victim->cache_vers = uc->uc_vers;
646 victim->cache_prog = uc->uc_prog;
647 victim->cache_addr = xprt->xp_rtaddr;
648 victim->cache_addr.buf = ALLOC(char, xprt->xp_rtaddr.len);
649 (void) memcpy(victim->cache_addr.buf, xprt->xp_rtaddr.buf,
650 (size_t)xprt->xp_rtaddr.len);
651 loc = CACHE_LOC(xprt, victim->cache_xid);
652 victim->cache_next = uc->uc_entries[loc];
653 uc->uc_entries[loc] = victim;
654 uc->uc_fifo[uc->uc_nextvictim++] = victim;
655 uc->uc_nextvictim %= uc->uc_size;
656 mutex_unlock(&dupreq_lock);
657}
658
659 /*
660 * Try to get an entry from the cache
661 * return 1 if found, 0 if not found and set the stage for cache_set()
662 */
663 static int
664 cache_get(SVCXPRT *xprt, struct rpc_msg *msg, char **replyp, size_t *replylenp)
665{
666 u_int loc;
667 cache_ptr ent;
668 struct svc_dg_data *su = su_data(xprt);
669 struct cl_cache *uc = (struct cl_cache *) su->su_cache;
670#ifdef RPC_CACHE_DEBUG
671 struct netconfig *nconf;
672 char *uaddr;
673#endif
674
675 mutex_lock(&dupreq_lock);
676 loc = CACHE_LOC(xprt, su->su_xid);
677 for (ent = uc->uc_entries[loc]; ent != NULL; ent = ent->cache_next) {
678 if (ent->cache_xid == su->su_xid &&
679 ent->cache_proc == msg->rm_call.cb_proc &&
680 ent->cache_vers == msg->rm_call.cb_vers &&
681 ent->cache_prog == msg->rm_call.cb_prog &&
682 ent->cache_addr.len == xprt->xp_rtaddr.len &&
683 (memcmp(ent->cache_addr.buf, xprt->xp_rtaddr.buf,
684 xprt->xp_rtaddr.len) == 0)) {
685#ifdef RPC_CACHE_DEBUG
686 if (nconf = getnetconfigent(xprt->xp_netid)) {
687 uaddr = taddr2uaddr(nconf, &xprt->xp_rtaddr);
688 freenetconfigent(nconf);
689 printf(
690 "cache entry found for xid=%x prog=%d vers=%d proc=%d for rmtaddr=%s\n",
691 su->su_xid, msg->rm_call.cb_prog,
692 msg->rm_call.cb_vers,
693 msg->rm_call.cb_proc, uaddr);
694 free(uaddr);
695 }
696#endif
697 *replyp = ent->cache_reply;
698 *replylenp = ent->cache_replylen;
699 mutex_unlock(&dupreq_lock);
700 return (1);
701 }
702 }
703 /*
704 * Failed to find entry
705 * Remember a few things so we can do a set later
706 */
707 uc->uc_proc = msg->rm_call.cb_proc;
708 uc->uc_vers = msg->rm_call.cb_vers;
709 uc->uc_prog = msg->rm_call.cb_prog;
710 mutex_unlock(&dupreq_lock);
711 return (0);
712}
713 

AltStyle によって変換されたページ (->オリジナル) /