Bug Summary

File:rx/rx_rdwr.c
Location:line 115, column 4
Description:Access to field 'prev' results in a dereference of a null pointer

Annotated Source Code

1 /*
2 * Copyright 2000, International Business Machines Corporation and others.
3 * All Rights Reserved.
4 *
5 * This software has been released under the terms of the IBM Public
6 * License. For details, see the LICENSE file in the top-level source
7 * directory or online at http://www.openafs.org/dl/license10.html
8 */
9
10#include <afsconfig.h>
11#include <afs/param.h>
12
13#ifdef KERNEL
14# ifndef UKERNEL
15# ifdef RX_KERNEL_TRACE
16# include "rx_kcommon.h"
17# endif
18# if defined(AFS_DARWIN_ENV) || defined(AFS_XBSD_ENV1)
19# include "afs/sysincludes.h"
20# else
21# include "h/types.h"
22# include "h/time.h"
23# include "h/stat.h"
24# if defined(AFS_AIX_ENV) || defined(AFS_AUX_ENV) || defined(AFS_SUN5_ENV)
25# include "h/systm.h"
26# endif
27# ifdef AFS_OSF_ENV
28# include <net/net_globals.h>
29# endif /* AFS_OSF_ENV */
30# ifdef AFS_LINUX20_ENV
31# include "h/socket.h"
32# endif
33# include "netinet/in.h"
34# if defined(AFS_SGI_ENV)
35# include "afs/sysincludes.h"
36# endif
37# endif
38# include "afs/afs_args.h"
39# if (defined(AFS_AUX_ENV) || defined(AFS_AIX_ENV))
40# include "h/systm.h"
41# endif
42# else /* !UKERNEL */
43# include "afs/sysincludes.h"
44# endif /* !UKERNEL */
45
46# ifdef RXDEBUG1
47# undef RXDEBUG1 /* turn off debugging */
48# endif /* RXDEBUG */
49
50# include "afs/afs_osi.h"
51# include "rx_kmutex.h"
52# include "rx/rx_kernel.h"
53# include "afs/lock.h"
54#else /* KERNEL */
55# include <roken.h>
56#endif /* KERNEL */
57
58#include "rx.h"
59#include "rx_clock.h"
60#include "rx_queue.h"
61#include "rx_globals.h"
62
63#ifdef RX_LOCKS_DB
64/* rxdb_fileID is used to identify the lock location, along with line#. */
65static int rxdb_fileID = RXDB_FILE_RX_RDWR4;
66#endif /* RX_LOCKS_DB */
67/* rxi_ReadProc -- internal version.
68 *
69 * LOCKS USED -- called at netpri
70 */
71int
72rxi_ReadProc(struct rx_call *call, char *buf,
73 int nbytes)
74{
75 struct rx_packet *cp = call->currentPacket;
76 struct rx_packet *rp;
77 int requestCount;
78 unsigned int t;
79
80/* XXXX took out clock_NewTime from here. Was it needed? */
81 requestCount = nbytes;
82
83 /* Free any packets from the last call to ReadvProc/WritevProc */
84 if (queue_IsNotEmpty(&call->iovq)(((struct rx_queue *)(&call->iovq))->next != ((struct
rx_queue *)(&call->iovq)))
) {
1
Taking false branch
85#ifdef RXDEBUG_PACKET
86 call->iovqc -=
87#endif /* RXDEBUG_PACKET */
88 rxi_FreePackets(0, &call->iovq);
89 }
90
91 do {
7
Loop condition is true. Execution continues on line 92
27
Loop condition is true. Execution continues on line 92
92 if (call->nLeft == 0) {
2
Taking false branch
8
Taking true branch
28
Taking true branch
93 /* Get next packet */
94 MUTEX_ENTER(&call->lock);
95 for (;;) {
9
Loop condition is true. Entering loop body
14
Loop condition is true. Entering loop body
19
Loop condition is true. Entering loop body
29
Loop condition is true. Entering loop body
96 if (call->error || (call->mode != RX_MODE_RECEIVING2)) {
10
Taking false branch
15
Taking false branch
20
Taking false branch
30
Taking false branch
97 if (call->error) {
98 call->mode = RX_MODE_ERROR3;
99 MUTEX_EXIT(&call->lock);
100 return 0;
101 }
102 if (call->mode == RX_MODE_SENDING1) {
103 MUTEX_EXIT(&call->lock);
104 rxi_FlushWrite(call);
105 MUTEX_ENTER(&call->lock);
106 continue;
107 }
108 }
109 if (queue_IsNotEmpty(&call->rq)(((struct rx_queue *)(&call->rq))->next != ((struct
rx_queue *)(&call->rq)))
) {
11
Taking false branch
16
Taking false branch
21
Taking true branch
31
Taking true branch
110 /* Check that next packet available is next in sequence */
111 rp = queue_First(&call->rq, rx_packet)((struct rx_packet *)((struct rx_queue *)(&call->rq))->
next)
;
112 if (rp->header.seq == call->rnext) {
22
Taking true branch
32
Taking true branch
113 afs_int32 error;
114 struct rx_connection *conn = call->conn;
115 queue_Remove(rp)(((((struct rx_queue *)(rp))->prev->next=((struct rx_queue
*)(rp))->next)->prev=((struct rx_queue *)(rp))->prev
), ((struct rx_queue *)(rp))->next = 0)
;
33
Within the expansion of the macro 'queue_Remove':
a
Access to field 'prev' results in a dereference of a null pointer
116#ifdef RX_TRACK_PACKETS
117 rp->flags &= ~RX_PKTFLAG_RQ;
118#endif
119#ifdef RXDEBUG_PACKET
120 call->rqc--;
121#endif /* RXDEBUG_PACKET */
122
123 /* RXS_CheckPacket called to undo RXS_PreparePacket's
124 * work. It may reduce the length of the packet by up
125 * to conn->maxTrailerSize, to reflect the length of the
126 * data + the header. */
127 if ((error =
23
Taking false branch
128 RXS_CheckPacket(conn->securityObject, call,((conn->securityObject && (conn->securityObject
->ops->op_CheckPacket)) ? (*(conn->securityObject)->
ops->op_CheckPacket)(conn->securityObject,call,rp) : 0)
129 rp)((conn->securityObject && (conn->securityObject
->ops->op_CheckPacket)) ? (*(conn->securityObject)->
ops->op_CheckPacket)(conn->securityObject,call,rp) : 0)
)) {
130 /* Used to merely shut down the call, but now we
131 * shut down the whole connection since this may
132 * indicate an attempt to hijack it */
133
134 MUTEX_EXIT(&call->lock);
135 rxi_ConnectionError(conn, error);
136 MUTEX_ENTER(&conn->conn_data_lock);
137 rp = rxi_SendConnectionAbort(conn, rp, 0, 0);
138 MUTEX_EXIT(&conn->conn_data_lock);
139 rxi_FreePacket(rp);
140
141 return 0;
142 }
143 call->rnext++;
144 cp = call->currentPacket = rp;
145#ifdef RX_TRACK_PACKETS
146 call->currentPacket->flags |= RX_PKTFLAG_CP;
147#endif
148 call->curvec = 1; /* 0th vec is always header */
149 /* begin at the beginning [ more or less ], continue
150 * on until the end, then stop. */
151 call->curpos =
152 (char *)cp->wirevec[1].iov_base +
153 call->conn->securityHeaderSize;
154 call->curlen =
155 cp->wirevec[1].iov_len -
156 call->conn->securityHeaderSize;
157
158 /* Notice that this code works correctly if the data
159 * size is 0 (which it may be--no reply arguments from
160 * server, for example). This relies heavily on the
161 * fact that the code below immediately frees the packet
162 * (no yields, etc.). If it didn't, this would be a
163 * problem because a value of zero for call->nLeft
164 * normally means that there is no read packet */
165 call->nLeft = cp->length;
166 hadd32(call->bytesRcvd, cp->length)((void)((((call->bytesRcvd).low ^ (int)(cp->length)) &
0x80000000) ? (((((call->bytesRcvd).low + (int)(cp->length
)) & 0x80000000) == 0) && (call->bytesRcvd).high
++) : (((call->bytesRcvd).low & (int)(cp->length) &
0x80000000) && (call->bytesRcvd).high++)), (call->
bytesRcvd).low += (int)(cp->length))
;
167
168 /* Send a hard ack for every rxi_HardAckRate+1 packets
169 * consumed. Otherwise schedule an event to send
170 * the hard ack later on.
171 */
172 call->nHardAcks++;
173 if (!(call->flags & RX_CALL_RECEIVE_DONE32)) {
24
Taking false branch
174 if (call->nHardAcks > (u_short) rxi_HardAckRate) {
175 rxevent_Cancel(call->delayedAckEvent, call,do { if (call->delayedAckEvent) { rxevent_Cancel_1(call->
delayedAckEvent, ((void *)0), 0); call->delayedAckEvent = (
(void *)0); } } while(0)
176 RX_CALL_REFCOUNT_DELAY)do { if (call->delayedAckEvent) { rxevent_Cancel_1(call->
delayedAckEvent, ((void *)0), 0); call->delayedAckEvent = (
(void *)0); } } while(0)
;
177 rxi_SendAck(call, 0, 0, RX_ACK_DELAY8, 0);
178 } else {
179 struct clock when, now;
180 clock_GetTime(&now)do { struct timeval tv; gettimeofday(&tv, ((void *)0)); (
&now)->sec = (afs_int32)tv.tv_sec; (&now)->usec
= (afs_int32)tv.tv_usec; } while(0)
;
181 when = now;
182 /* Delay to consolidate ack packets */
183 clock_Add(&when, &rx_hardAckDelay)do { (&when)->sec += (&rx_hardAckDelay)->sec; if
(((&when)->usec += (&rx_hardAckDelay)->usec) >=
1000000) { (&when)->usec -= 1000000; (&when)->
sec++; } } while(0)
;
184 if (!call->delayedAckEvent
185 || clock_Gt(&call->delayedAckEvent->((&call->delayedAckEvent-> eventTime)->sec>(&
when)->sec || ((&call->delayedAckEvent-> eventTime
)->sec==(&when)->sec && (&call->delayedAckEvent
-> eventTime)->usec>(&when)->usec))
186 eventTime, &when)((&call->delayedAckEvent-> eventTime)->sec>(&
when)->sec || ((&call->delayedAckEvent-> eventTime
)->sec==(&when)->sec && (&call->delayedAckEvent
-> eventTime)->usec>(&when)->usec))
) {
187 rxevent_Cancel(call->delayedAckEvent,do { if (call->delayedAckEvent) { rxevent_Cancel_1(call->
delayedAckEvent, ((void *)0), 0); call->delayedAckEvent = (
(void *)0); } } while(0)
188 call,do { if (call->delayedAckEvent) { rxevent_Cancel_1(call->
delayedAckEvent, ((void *)0), 0); call->delayedAckEvent = (
(void *)0); } } while(0)
189 RX_CALL_REFCOUNT_DELAY)do { if (call->delayedAckEvent) { rxevent_Cancel_1(call->
delayedAckEvent, ((void *)0), 0); call->delayedAckEvent = (
(void *)0); } } while(0)
;
190 MUTEX_ENTER(&rx_refcnt_mutex);
191 CALL_HOLD(call, RX_CALL_REFCOUNT_DELAY);
192 MUTEX_EXIT(&rx_refcnt_mutex);
193 call->delayedAckEvent =
194 rxevent_PostNow(&when, &now,
195 rxi_SendDelayedAck, call,
196 0);
197 }
198 }
199 }
200 break;
25
Execution continues on line 283
201 }
202 }
203
204 /*
205 * If we reach this point either we have no packets in the
206 * receive queue or the next packet in the queue is not the
207 * one we are looking for. There is nothing else for us to
208 * do but wait for another packet to arrive.
209 */
210
211 /* Are there ever going to be any more packets? */
212 if (call->flags & RX_CALL_RECEIVE_DONE32) {
12
Taking false branch
17
Taking false branch
213 MUTEX_EXIT(&call->lock);
214 return requestCount - nbytes;
215 }
216 /* Wait for in-sequence packet */
217 call->flags |= RX_CALL_READER_WAIT1;
218 clock_NewTime();
219 call->startWait = clock_Sec()(time(((void *)0)));
220 while (call->flags & RX_CALL_READER_WAIT1) {
13
Loop condition is false. Execution continues on line 227
18
Loop condition is false. Execution continues on line 227
221#ifdef RX_ENABLE_LOCKS
222 CV_WAIT(&call->cv_rq, &call->lock);
223#else
224 osi_rxSleep(&call->rq)rxi_Sleep(&call->rq);
225#endif
226 }
227 cp = call->currentPacket;
228
229 call->startWait = 0;
230#ifdef RX_ENABLE_LOCKS
231 if (call->error) {
232 MUTEX_EXIT(&call->lock);
233 return 0;
234 }
235#endif /* RX_ENABLE_LOCKS */
236 }
237 MUTEX_EXIT(&call->lock);
238 } else
239 /* osi_Assert(cp); */
240 /* MTUXXX this should be replaced by some error-recovery code before shipping */
241 /* yes, the following block is allowed to be the ELSE clause (or not) */
242 /* It's possible for call->nLeft to be smaller than any particular
243 * iov_len. Usually, recvmsg doesn't change the iov_len, since it
244 * reflects the size of the buffer. We have to keep track of the
245 * number of bytes read in the length field of the packet struct. On
246 * the final portion of a received packet, it's almost certain that
247 * call->nLeft will be smaller than the final buffer. */
248 while (nbytes && cp) {
3
Loop condition is true. Entering loop body
5
Loop condition is false. Execution continues on line 283
249 t = MIN((int)call->curlen, nbytes)((((int)call->curlen)<(nbytes))?((int)call->curlen):
(nbytes))
;
250 t = MIN(t, (int)call->nLeft)(((t)<((int)call->nLeft))?(t):((int)call->nLeft));
251 memcpy(buf, call->curpos, t);
252 buf += t;
253 nbytes -= t;
254 call->curpos += t;
255 call->curlen -= t;
256 call->nLeft -= t;
257
258 if (!call->nLeft) {
4
Taking true branch
259 /* out of packet. Get another one. */
260#ifdef RX_TRACK_PACKETS
261 call->currentPacket->flags &= ~RX_PKTFLAG_CP;
262#endif
263 rxi_FreePacket(cp);
264 cp = call->currentPacket = (struct rx_packet *)0;
265 } else if (!call->curlen) {
266 /* need to get another struct iov */
267 if (++call->curvec >= cp->niovecs) {
268 /* current packet is exhausted, get ready for another */
269 /* don't worry about curvec and stuff, they get set somewhere else */
270#ifdef RX_TRACK_PACKETS
271 call->currentPacket->flags &= ~RX_PKTFLAG_CP;
272#endif
273 rxi_FreePacket(cp);
274 cp = call->currentPacket = (struct rx_packet *)0;
275 call->nLeft = 0;
276 } else {
277 call->curpos =
278 (char *)cp->wirevec[call->curvec].iov_base;
279 call->curlen = cp->wirevec[call->curvec].iov_len;
280 }
281 }
282 }
283 if (!nbytes) {
6
Taking false branch
26
Taking false branch
284 /* user buffer is full, return */
285 return requestCount;
286 }
287
288 } while (nbytes);
289
290 return requestCount;
291}
292
293int
294rx_ReadProc(struct rx_call *call, char *buf, int nbytes)
295{
296 int bytes;
297 int tcurlen;
298 int tnLeft;
299 char *tcurpos;
300 SPLVAR;
301
302 /* Free any packets from the last call to ReadvProc/WritevProc */
303 if (!queue_IsEmpty(&call->iovq)(((struct rx_queue *)(&call->iovq))->next == ((struct
rx_queue *)(&call->iovq)))
) {
304#ifdef RXDEBUG_PACKET
305 call->iovqc -=
306#endif /* RXDEBUG_PACKET */
307 rxi_FreePackets(0, &call->iovq);
308 }
309
310 /*
311 * Most common case, all of the data is in the current iovec.
312 * We are relying on nLeft being zero unless the call is in receive mode.
313 */
314 tcurlen = call->curlen;
315 tnLeft = call->nLeft;
316 if (!call->error && tcurlen > nbytes && tnLeft > nbytes) {
317 tcurpos = call->curpos;
318 memcpy(buf, tcurpos, nbytes);
319
320 call->curpos = tcurpos + nbytes;
321 call->curlen = tcurlen - nbytes;
322 call->nLeft = tnLeft - nbytes;
323
324 if (!call->nLeft && call->currentPacket != NULL((void *)0)) {
325 /* out of packet. Get another one. */
326 rxi_FreePacket(call->currentPacket);
327 call->currentPacket = (struct rx_packet *)0;
328 }
329 return nbytes;
330 }
331
332 NETPRI;
333 bytes = rxi_ReadProc(call, buf, nbytes);
334 USERPRI;
335 return bytes;
336}
337
338/* Optimization for unmarshalling 32 bit integers */
339int
340rx_ReadProc32(struct rx_call *call, afs_int32 * value)
341{
342 int bytes;
343 int tcurlen;
344 int tnLeft;
345 char *tcurpos;
346 SPLVAR;
347
348 /* Free any packets from the last call to ReadvProc/WritevProc */
349 if (!queue_IsEmpty(&call->iovq)(((struct rx_queue *)(&call->iovq))->next == ((struct
rx_queue *)(&call->iovq)))
) {
350#ifdef RXDEBUG_PACKET
351 call->iovqc -=
352#endif /* RXDEBUG_PACKET */
353 rxi_FreePackets(0, &call->iovq);
354 }
355
356 /*
357 * Most common case, all of the data is in the current iovec.
358 * We are relying on nLeft being zero unless the call is in receive mode.
359 */
360 tcurlen = call->curlen;
361 tnLeft = call->nLeft;
362 if (!call->error && tcurlen >= sizeof(afs_int32)
363 && tnLeft >= sizeof(afs_int32)) {
364 tcurpos = call->curpos;
365
366 memcpy((char *)value, tcurpos, sizeof(afs_int32));
367
368 call->curpos = tcurpos + sizeof(afs_int32);
369 call->curlen = (u_short)(tcurlen - sizeof(afs_int32));
370 call->nLeft = (u_short)(tnLeft - sizeof(afs_int32));
371 if (!call->nLeft && call->currentPacket != NULL((void *)0)) {
372 /* out of packet. Get another one. */
373 rxi_FreePacket(call->currentPacket);
374 call->currentPacket = (struct rx_packet *)0;
375 }
376 return sizeof(afs_int32);
377 }
378
379 NETPRI;
380 bytes = rxi_ReadProc(call, (char *)value, sizeof(afs_int32));
381 USERPRI;
382
383 return bytes;
384}
385
386/* rxi_FillReadVec
387 *
388 * Uses packets in the receive queue to fill in as much of the
389 * current iovec as possible. Does not block if it runs out
390 * of packets to complete the iovec. Return true if an ack packet
391 * was sent, otherwise return false */
392int
393rxi_FillReadVec(struct rx_call *call, afs_uint32 serial)
394{
395 int didConsume = 0;
396 int didHardAck = 0;
397 unsigned int t;
398 struct rx_packet *rp;
399 struct rx_packet *curp;
400 struct iovec *call_iov;
401 struct iovec *cur_iov = NULL((void *)0);
402
403 curp = call->currentPacket;
404 if (curp) {
405 cur_iov = &curp->wirevec[call->curvec];
406 }
407 call_iov = &call->iov[call->iovNext];
408
409 while (!call->error && call->iovNBytes && call->iovNext < call->iovMax) {
410 if (call->nLeft == 0) {
411 /* Get next packet */
412 if (queue_IsNotEmpty(&call->rq)(((struct rx_queue *)(&call->rq))->next != ((struct
rx_queue *)(&call->rq)))
) {
413 /* Check that next packet available is next in sequence */
414 rp = queue_First(&call->rq, rx_packet)((struct rx_packet *)((struct rx_queue *)(&call->rq))->
next)
;
415 if (rp->header.seq == call->rnext) {
416 afs_int32 error;
417 struct rx_connection *conn = call->conn;
418 queue_Remove(rp)(((((struct rx_queue *)(rp))->prev->next=((struct rx_queue
*)(rp))->next)->prev=((struct rx_queue *)(rp))->prev
), ((struct rx_queue *)(rp))->next = 0)
;
419#ifdef RX_TRACK_PACKETS
420 rp->flags &= ~RX_PKTFLAG_RQ;
421#endif
422#ifdef RXDEBUG_PACKET
423 call->rqc--;
424#endif /* RXDEBUG_PACKET */
425
426 /* RXS_CheckPacket called to undo RXS_PreparePacket's
427 * work. It may reduce the length of the packet by up
428 * to conn->maxTrailerSize, to reflect the length of the
429 * data + the header. */
430 if ((error =
431 RXS_CheckPacket(conn->securityObject, call, rp)((conn->securityObject && (conn->securityObject
->ops->op_CheckPacket)) ? (*(conn->securityObject)->
ops->op_CheckPacket)(conn->securityObject,call,rp) : 0)
)) {
432 /* Used to merely shut down the call, but now we
433 * shut down the whole connection since this may
434 * indicate an attempt to hijack it */
435
436 MUTEX_EXIT(&call->lock);
437 rxi_ConnectionError(conn, error);
438 MUTEX_ENTER(&conn->conn_data_lock);
439 rp = rxi_SendConnectionAbort(conn, rp, 0, 0);
440 MUTEX_EXIT(&conn->conn_data_lock);
441 rxi_FreePacket(rp);
442 MUTEX_ENTER(&call->lock);
443
444 return 1;
445 }
446 call->rnext++;
447 curp = call->currentPacket = rp;
448#ifdef RX_TRACK_PACKETS
449 call->currentPacket->flags |= RX_PKTFLAG_CP;
450#endif
451 call->curvec = 1; /* 0th vec is always header */
452 cur_iov = &curp->wirevec[1];
453 /* begin at the beginning [ more or less ], continue
454 * on until the end, then stop. */
455 call->curpos =
456 (char *)curp->wirevec[1].iov_base +
457 call->conn->securityHeaderSize;
458 call->curlen =
459 curp->wirevec[1].iov_len -
460 call->conn->securityHeaderSize;
461
462 /* Notice that this code works correctly if the data
463 * size is 0 (which it may be--no reply arguments from
464 * server, for example). This relies heavily on the
465 * fact that the code below immediately frees the packet
466 * (no yields, etc.). If it didn't, this would be a
467 * problem because a value of zero for call->nLeft
468 * normally means that there is no read packet */
469 call->nLeft = curp->length;
470 hadd32(call->bytesRcvd, curp->length)((void)((((call->bytesRcvd).low ^ (int)(curp->length)) &
0x80000000) ? (((((call->bytesRcvd).low + (int)(curp->
length)) & 0x80000000) == 0) && (call->bytesRcvd
).high++) : (((call->bytesRcvd).low & (int)(curp->length
) & 0x80000000) && (call->bytesRcvd).high++)),
(call->bytesRcvd).low += (int)(curp->length))
;
471
472 /* Send a hard ack for every rxi_HardAckRate+1 packets
473 * consumed. Otherwise schedule an event to send
474 * the hard ack later on.
475 */
476 call->nHardAcks++;
477 didConsume = 1;
478 continue;
479 }
480 }
481 break;
482 }
483
484 /* It's possible for call->nLeft to be smaller than any particular
485 * iov_len. Usually, recvmsg doesn't change the iov_len, since it
486 * reflects the size of the buffer. We have to keep track of the
487 * number of bytes read in the length field of the packet struct. On
488 * the final portion of a received packet, it's almost certain that
489 * call->nLeft will be smaller than the final buffer. */
490 while (call->iovNBytes && call->iovNext < call->iovMax && curp) {
491
492 t = MIN((int)call->curlen, call->iovNBytes)((((int)call->curlen)<(call->iovNBytes))?((int)call->
curlen):(call->iovNBytes))
;
493 t = MIN(t, (int)call->nLeft)(((t)<((int)call->nLeft))?(t):((int)call->nLeft));
494 call_iov->iov_base = call->curpos;
495 call_iov->iov_len = t;
496 call_iov++;
497 call->iovNext++;
498 call->iovNBytes -= t;
499 call->curpos += t;
500 call->curlen -= t;
501 call->nLeft -= t;
502
503 if (!call->nLeft) {
504 /* out of packet. Get another one. */
505#ifdef RX_TRACK_PACKETS
506 curp->flags &= ~RX_PKTFLAG_CP;
507 curp->flags |= RX_PKTFLAG_IOVQ;
508#endif
509 queue_Append(&call->iovq, curp)(((((struct rx_queue *)(curp))->prev=((struct rx_queue *)(
&call->iovq))->prev)->next=((struct rx_queue *)(
curp)))->next=((struct rx_queue *)(&call->iovq)), (
(struct rx_queue *)(&call->iovq))->prev=((struct rx_queue
*)(curp)))
;
510#ifdef RXDEBUG_PACKET
511 call->iovqc++;
512#endif /* RXDEBUG_PACKET */
513 curp = call->currentPacket = (struct rx_packet *)0;
514 } else if (!call->curlen) {
515 /* need to get another struct iov */
516 if (++call->curvec >= curp->niovecs) {
517 /* current packet is exhausted, get ready for another */
518 /* don't worry about curvec and stuff, they get set somewhere else */
519#ifdef RX_TRACK_PACKETS
520 curp->flags &= ~RX_PKTFLAG_CP;
521 curp->flags |= RX_PKTFLAG_IOVQ;
522#endif
523 queue_Append(&call->iovq, curp)(((((struct rx_queue *)(curp))->prev=((struct rx_queue *)(
&call->iovq))->prev)->next=((struct rx_queue *)(
curp)))->next=((struct rx_queue *)(&call->iovq)), (
(struct rx_queue *)(&call->iovq))->prev=((struct rx_queue
*)(curp)))
;
524#ifdef RXDEBUG_PACKET
525 call->iovqc++;
526#endif /* RXDEBUG_PACKET */
527 curp = call->currentPacket = (struct rx_packet *)0;
528 call->nLeft = 0;
529 } else {
530 cur_iov++;
531 call->curpos = (char *)cur_iov->iov_base;
532 call->curlen = cur_iov->iov_len;
533 }
534 }
535 }
536 }
537
538 /* If we consumed any packets then check whether we need to
539 * send a hard ack. */
540 if (didConsume && (!(call->flags & RX_CALL_RECEIVE_DONE32))) {
541 if (call->nHardAcks > (u_short) rxi_HardAckRate) {
542 rxevent_Cancel(call->delayedAckEvent, call,do { if (call->delayedAckEvent) { rxevent_Cancel_1(call->
delayedAckEvent, ((void *)0), 0); call->delayedAckEvent = (
(void *)0); } } while(0)
543 RX_CALL_REFCOUNT_DELAY)do { if (call->delayedAckEvent) { rxevent_Cancel_1(call->
delayedAckEvent, ((void *)0), 0); call->delayedAckEvent = (
(void *)0); } } while(0)
;
544 rxi_SendAck(call, 0, serial, RX_ACK_DELAY8, 0);
545 didHardAck = 1;
546 } else {
547 struct clock when, now;
548 clock_GetTime(&now)do { struct timeval tv; gettimeofday(&tv, ((void *)0)); (
&now)->sec = (afs_int32)tv.tv_sec; (&now)->usec
= (afs_int32)tv.tv_usec; } while(0)
;
549 when = now;
550 /* Delay to consolidate ack packets */
551 clock_Add(&when, &rx_hardAckDelay)do { (&when)->sec += (&rx_hardAckDelay)->sec; if
(((&when)->usec += (&rx_hardAckDelay)->usec) >=
1000000) { (&when)->usec -= 1000000; (&when)->
sec++; } } while(0)
;
552 if (!call->delayedAckEvent
553 || clock_Gt(&call->delayedAckEvent->eventTime, &when)((&call->delayedAckEvent->eventTime)->sec>(&
when)->sec || ((&call->delayedAckEvent->eventTime
)->sec==(&when)->sec && (&call->delayedAckEvent
->eventTime)->usec>(&when)->usec))
) {
554 rxevent_Cancel(call->delayedAckEvent, call,do { if (call->delayedAckEvent) { rxevent_Cancel_1(call->
delayedAckEvent, ((void *)0), 0); call->delayedAckEvent = (
(void *)0); } } while(0)
555 RX_CALL_REFCOUNT_DELAY)do { if (call->delayedAckEvent) { rxevent_Cancel_1(call->
delayedAckEvent, ((void *)0), 0); call->delayedAckEvent = (
(void *)0); } } while(0)
;
556 MUTEX_ENTER(&rx_refcnt_mutex);
557 CALL_HOLD(call, RX_CALL_REFCOUNT_DELAY);
558 MUTEX_EXIT(&rx_refcnt_mutex);
559 call->delayedAckEvent =
560 rxevent_PostNow(&when, &now, rxi_SendDelayedAck, call, 0);
561 }
562 }
563 }
564 return didHardAck;
565}
566
567
568/* rxi_ReadvProc -- internal version.
569 *
570 * Fills in an iovec with pointers to the packet buffers. All packets
571 * except the last packet (new current packet) are moved to the iovq
572 * while the application is processing the data.
573 *
574 * LOCKS USED -- called at netpri.
575 */
576int
577rxi_ReadvProc(struct rx_call *call, struct iovec *iov, int *nio, int maxio,
578 int nbytes)
579{
580 int bytes;
581
582 /* Free any packets from the last call to ReadvProc/WritevProc */
583 if (queue_IsNotEmpty(&call->iovq)(((struct rx_queue *)(&call->iovq))->next != ((struct
rx_queue *)(&call->iovq)))
) {
584#ifdef RXDEBUG_PACKET
585 call->iovqc -=
586#endif /* RXDEBUG_PACKET */
587 rxi_FreePackets(0, &call->iovq);
588 }
589
590 if (call->mode == RX_MODE_SENDING1) {
591 rxi_FlushWrite(call);
592 }
593
594 MUTEX_ENTER(&call->lock);
595 if (call->error)
596 goto error;
597
598 /* Get whatever data is currently available in the receive queue.
599 * If rxi_FillReadVec sends an ack packet then it is possible
600 * that we will receive more data while we drop the call lock
601 * to send the packet. Set the RX_CALL_IOVEC_WAIT flag
602 * here to avoid a race with the receive thread if we send
603 * hard acks in rxi_FillReadVec. */
604 call->flags |= RX_CALL_IOVEC_WAIT16384;
605 call->iovNBytes = nbytes;
606 call->iovMax = maxio;
607 call->iovNext = 0;
608 call->iov = iov;
609 rxi_FillReadVec(call, 0);
610
611 /* if we need more data then sleep until the receive thread has
612 * filled in the rest. */
613 if (!call->error && call->iovNBytes && call->iovNext < call->iovMax
614 && !(call->flags & RX_CALL_RECEIVE_DONE32)) {
615 call->flags |= RX_CALL_READER_WAIT1;
616 clock_NewTime();
617 call->startWait = clock_Sec()(time(((void *)0)));
618 while (call->flags & RX_CALL_READER_WAIT1) {
619#ifdef RX_ENABLE_LOCKS
620 CV_WAIT(&call->cv_rq, &call->lock);
621#else
622 osi_rxSleep(&call->rq)rxi_Sleep(&call->rq);
623#endif
624 }
625 call->startWait = 0;
626 }
627 call->flags &= ~RX_CALL_IOVEC_WAIT16384;
628
629 if (call->error)
630 goto error;
631
632 call->iov = NULL((void *)0);
633 *nio = call->iovNext;
634 bytes = nbytes - call->iovNBytes;
635 MUTEX_EXIT(&call->lock);
636 return bytes;
637
638 error:
639 MUTEX_EXIT(&call->lock);
640 call->mode = RX_MODE_ERROR3;
641 return 0;
642}
643
644int
645rx_ReadvProc(struct rx_call *call, struct iovec *iov, int *nio, int maxio,
646 int nbytes)
647{
648 int bytes;
649 SPLVAR;
650
651 NETPRI;
652 bytes = rxi_ReadvProc(call, iov, nio, maxio, nbytes);
653 USERPRI;
654 return bytes;
655}
656
657/* rxi_WriteProc -- internal version.
658 *
659 * LOCKS USED -- called at netpri
660 */
661
662int
663rxi_WriteProc(struct rx_call *call, char *buf,
664 int nbytes)
665{
666 struct rx_connection *conn = call->conn;
667 struct rx_packet *cp = call->currentPacket;
668 unsigned int t;
669 int requestCount = nbytes;
670
671 /* Free any packets from the last call to ReadvProc/WritevProc */
672 if (queue_IsNotEmpty(&call->iovq)(((struct rx_queue *)(&call->iovq))->next != ((struct
rx_queue *)(&call->iovq)))
) {
673#ifdef RXDEBUG_PACKET
674 call->iovqc -=
675#endif /* RXDEBUG_PACKET */
676 rxi_FreePackets(0, &call->iovq);
677 }
678
679 if (call->mode != RX_MODE_SENDING1) {
680 if ((conn->type == RX_SERVER_CONNECTION1)
681 && (call->mode == RX_MODE_RECEIVING2)) {
682 call->mode = RX_MODE_SENDING1;
683 if (cp) {
684#ifdef RX_TRACK_PACKETS
685 cp->flags &= ~RX_PKTFLAG_CP;
686#endif
687 rxi_FreePacket(cp);
688 cp = call->currentPacket = (struct rx_packet *)0;
689 call->nLeft = 0;
690 call->nFree = 0;
691 }
692 } else {
693 return 0;
694 }
695 }
696
697 /* Loop condition is checked at end, so that a write of 0 bytes
698 * will force a packet to be created--specially for the case where
699 * there are 0 bytes on the stream, but we must send a packet
700 * anyway. */
701 do {
702 if (call->nFree == 0) {
703 MUTEX_ENTER(&call->lock);
704 cp = call->currentPacket;
705 if (call->error)
706 call->mode = RX_MODE_ERROR3;
707 if (!call->error && cp) {
708 /* Clear the current packet now so that if
709 * we are forced to wait and drop the lock
710 * the packet we are planning on using
711 * cannot be freed.
712 */
713#ifdef RX_TRACK_PACKETS
714 cp->flags &= ~RX_PKTFLAG_CP;
715#endif
716 call->currentPacket = (struct rx_packet *)0;
717 clock_NewTime(); /* Bogus: need new time package */
718 /* The 0, below, specifies that it is not the last packet:
719 * there will be others. PrepareSendPacket may
720 * alter the packet length by up to
721 * conn->securityMaxTrailerSize */
722 hadd32(call->bytesSent, cp->length)((void)((((call->bytesSent).low ^ (int)(cp->length)) &
0x80000000) ? (((((call->bytesSent).low + (int)(cp->length
)) & 0x80000000) == 0) && (call->bytesSent).high
++) : (((call->bytesSent).low & (int)(cp->length) &
0x80000000) && (call->bytesSent).high++)), (call->
bytesSent).low += (int)(cp->length))
;
723 rxi_PrepareSendPacket(call, cp, 0);
724#ifdef AFS_GLOBAL_RXLOCK_KERNEL
725 /* PrepareSendPacket drops the call lock */
726 rxi_WaitforTQBusy(call);
727#endif /* AFS_GLOBAL_RXLOCK_KERNEL */
728#ifdef RX_TRACK_PACKETS
729 cp->flags |= RX_PKTFLAG_TQ;
730#endif
731 queue_Append(&call->tq, cp)(((((struct rx_queue *)(cp))->prev=((struct rx_queue *)(&
call->tq))->prev)->next=((struct rx_queue *)(cp)))->
next=((struct rx_queue *)(&call->tq)), ((struct rx_queue
*)(&call->tq))->prev=((struct rx_queue *)(cp)))
;
732#ifdef RXDEBUG_PACKET
733 call->tqc++;
734#endif /* RXDEBUG_PACKET */
735 cp = (struct rx_packet *)0;
736 /* If the call is in recovery, let it exhaust its current
737 * retransmit queue before forcing it to send new packets
738 */
739 if (!(call->flags & (RX_CALL_FAST_RECOVER2048))) {
740 rxi_Start(call, 0);
741 }
742 } else if (cp) {
743#ifdef RX_TRACK_PACKETS
744 cp->flags &= ~RX_PKTFLAG_CP;
745#endif
746 rxi_FreePacket(cp);
747 cp = call->currentPacket = (struct rx_packet *)0;
748 }
749 /* Wait for transmit window to open up */
750 while (!call->error
751 && call->tnext + 1 > call->tfirst + (2 * call->twind)) {
752 clock_NewTime();
753 call->startWait = clock_Sec()(time(((void *)0)));
754
755#ifdef RX_ENABLE_LOCKS
756 CV_WAIT(&call->cv_twind, &call->lock);
757#else
758 call->flags |= RX_CALL_WAIT_WINDOW_ALLOC2;
759 osi_rxSleep(&call->twind)rxi_Sleep(&call->twind);
760#endif
761
762 call->startWait = 0;
763#ifdef RX_ENABLE_LOCKS
764 if (call->error) {
765 call->mode = RX_MODE_ERROR3;
766 MUTEX_EXIT(&call->lock);
767 return 0;
768 }
769#endif /* RX_ENABLE_LOCKS */
770 }
771 if ((cp = rxi_AllocSendPacket(call, nbytes))) {
772#ifdef RX_TRACK_PACKETS
773 cp->flags |= RX_PKTFLAG_CP;
774#endif
775 call->currentPacket = cp;
776 call->nFree = cp->length;
777 call->curvec = 1; /* 0th vec is always header */
778 /* begin at the beginning [ more or less ], continue
779 * on until the end, then stop. */
780 call->curpos =
781 (char *)cp->wirevec[1].iov_base +
782 call->conn->securityHeaderSize;
783 call->curlen =
784 cp->wirevec[1].iov_len - call->conn->securityHeaderSize;
785 }
786 if (call->error) {
787 call->mode = RX_MODE_ERROR3;
788 if (cp) {
789#ifdef RX_TRACK_PACKETS
790 cp->flags &= ~RX_PKTFLAG_CP;
791#endif
792 rxi_FreePacket(cp);
793 call->currentPacket = NULL((void *)0);
794 }
795 MUTEX_EXIT(&call->lock);
796 return 0;
797 }
798 MUTEX_EXIT(&call->lock);
799 }
800
801 if (cp && (int)call->nFree < nbytes) {
802 /* Try to extend the current buffer */
803 int len, mud;
804 len = cp->length;
805 mud = rx_MaxUserDataSize(call)((call)->MTU - sizeof (struct rx_header) - (call)->conn
->securityHeaderSize - (call)->conn->securityMaxTrailerSize
)
;
806 if (mud > len) {
807 int want;
808 want = MIN(nbytes - (int)call->nFree, mud - len)(((nbytes - (int)call->nFree)<(mud - len))?(nbytes - (int
)call->nFree):(mud - len))
;
809 rxi_AllocDataBuf(cp, want, RX_PACKET_CLASS_SEND_CBUF4);
810 if (cp->length > (unsigned)mud)
811 cp->length = mud;
812 call->nFree += (cp->length - len);
813 }
814 }
815
816 /* If the remaining bytes fit in the buffer, then store them
817 * and return. Don't ship a buffer that's full immediately to
818 * the peer--we don't know if it's the last buffer yet */
819
820 if (!cp) {
821 call->nFree = 0;
822 }
823
824 while (nbytes && call->nFree) {
825
826 t = MIN((int)call->curlen, nbytes)((((int)call->curlen)<(nbytes))?((int)call->curlen):
(nbytes))
;
827 t = MIN((int)call->nFree, t)((((int)call->nFree)<(t))?((int)call->nFree):(t));
828 memcpy(call->curpos, buf, t);
829 buf += t;
830 nbytes -= t;
831 call->curpos += t;
832 call->curlen -= (u_short)t;
833 call->nFree -= (u_short)t;
834
835 if (!call->curlen) {
836 /* need to get another struct iov */
837 if (++call->curvec >= cp->niovecs) {
838 /* current packet is full, extend or send it */
839 call->nFree = 0;
840 } else {
841 call->curpos = (char *)cp->wirevec[call->curvec].iov_base;
842 call->curlen = cp->wirevec[call->curvec].iov_len;
843 }
844 }
845 } /* while bytes to send and room to send them */
846
847 /* might be out of space now */
848 if (!nbytes) {
849 return requestCount;
850 } else; /* more data to send, so get another packet and keep going */
851 } while (nbytes);
852
853 return requestCount - nbytes;
854}
855
856int
857rx_WriteProc(struct rx_call *call, char *buf, int nbytes)
858{
859 int bytes;
860 int tcurlen;
861 int tnFree;
862 char *tcurpos;
863 SPLVAR;
864
865 /* Free any packets from the last call to ReadvProc/WritevProc */
866 if (queue_IsNotEmpty(&call->iovq)(((struct rx_queue *)(&call->iovq))->next != ((struct
rx_queue *)(&call->iovq)))
) {
867#ifdef RXDEBUG_PACKET
868 call->iovqc -=
869#endif /* RXDEBUG_PACKET */
870 rxi_FreePackets(0, &call->iovq);
871 }
872
873 /*
874 * Most common case: all of the data fits in the current iovec.
875 * We are relying on nFree being zero unless the call is in send mode.
876 */
877 tcurlen = (int)call->curlen;
878 tnFree = (int)call->nFree;
879 if (!call->error && tcurlen >= nbytes && tnFree >= nbytes) {
880 tcurpos = call->curpos;
881
882 memcpy(tcurpos, buf, nbytes);
883 call->curpos = tcurpos + nbytes;
884 call->curlen = (u_short)(tcurlen - nbytes);
885 call->nFree = (u_short)(tnFree - nbytes);
886 return nbytes;
887 }
888
889 NETPRI;
890 bytes = rxi_WriteProc(call, buf, nbytes);
891 USERPRI;
892 return bytes;
893}
894
895/* Optimization for marshalling 32 bit arguments */
896int
897rx_WriteProc32(struct rx_call *call, afs_int32 * value)
898{
899 int bytes;
900 int tcurlen;
901 int tnFree;
902 char *tcurpos;
903 SPLVAR;
904
905 if (queue_IsNotEmpty(&call->iovq)(((struct rx_queue *)(&call->iovq))->next != ((struct
rx_queue *)(&call->iovq)))
) {
906#ifdef RXDEBUG_PACKET
907 call->iovqc -=
908#endif /* RXDEBUG_PACKET */
909 rxi_FreePackets(0, &call->iovq);
910 }
911
912 /*
913 * Most common case: all of the data fits in the current iovec.
914 * We are relying on nFree being zero unless the call is in send mode.
915 */
916 tcurlen = call->curlen;
917 tnFree = call->nFree;
918 if (!call->error && tcurlen >= sizeof(afs_int32)
919 && tnFree >= sizeof(afs_int32)) {
920 tcurpos = call->curpos;
921
922 if (!((size_t)tcurpos & (sizeof(afs_int32) - 1))) {
923 *((afs_int32 *) (tcurpos)) = *value;
924 } else {
925 memcpy(tcurpos, (char *)value, sizeof(afs_int32));
926 }
927 call->curpos = tcurpos + sizeof(afs_int32);
928 call->curlen = (u_short)(tcurlen - sizeof(afs_int32));
929 call->nFree = (u_short)(tnFree - sizeof(afs_int32));
930 return sizeof(afs_int32);
931 }
932
933 NETPRI;
934 bytes = rxi_WriteProc(call, (char *)value, sizeof(afs_int32));
935 USERPRI;
936 return bytes;
937}
938
939/* rxi_WritevAlloc -- internal version.
940 *
941 * Fill in an iovec to point to data in packet buffers. The application
942 * calls rxi_WritevProc when the buffers are full.
943 *
944 * LOCKS USED -- called at netpri.
945 */
946
947static int
948rxi_WritevAlloc(struct rx_call *call, struct iovec *iov, int *nio, int maxio,
949 int nbytes)
950{
951 struct rx_connection *conn = call->conn;
952 struct rx_packet *cp = call->currentPacket;
953 int requestCount;
954 int nextio;
955 /* Temporary values, real work is done in rxi_WritevProc */
956 int tnFree;
957 unsigned int tcurvec;
958 char *tcurpos;
959 int tcurlen;
960
961 requestCount = nbytes;
962 nextio = 0;
963
964 /* Free any packets from the last call to ReadvProc/WritevProc */
965 if (queue_IsNotEmpty(&call->iovq)(((struct rx_queue *)(&call->iovq))->next != ((struct
rx_queue *)(&call->iovq)))
) {
966#ifdef RXDEBUG_PACKET
967 call->iovqc -=
968#endif /* RXDEBUG_PACKET */
969 rxi_FreePackets(0, &call->iovq);
970 }
971
972 if (call->mode != RX_MODE_SENDING1) {
973 if ((conn->type == RX_SERVER_CONNECTION1)
974 && (call->mode == RX_MODE_RECEIVING2)) {
975 call->mode = RX_MODE_SENDING1;
976 if (cp) {
977#ifdef RX_TRACK_PACKETS
978 cp->flags &= ~RX_PKTFLAG_CP;
979#endif
980 rxi_FreePacket(cp);
981 cp = call->currentPacket = (struct rx_packet *)0;
982 call->nLeft = 0;
983 call->nFree = 0;
984 }
985 } else {
986 return 0;
987 }
988 }
989
990 /* Set up the iovec to point to data in packet buffers. */
991 tnFree = call->nFree;
992 tcurvec = call->curvec;
993 tcurpos = call->curpos;
994 tcurlen = call->curlen;
995 do {
996 int t;
997
998 if (tnFree == 0) {
999 /* current packet is full, allocate a new one */
1000 MUTEX_ENTER(&call->lock);
1001 cp = rxi_AllocSendPacket(call, nbytes);
1002 MUTEX_EXIT(&call->lock);
1003 if (cp == NULL((void *)0)) {
1004 /* out of space, return what we have */
1005 *nio = nextio;
1006 return requestCount - nbytes;
1007 }
1008#ifdef RX_TRACK_PACKETS
1009 cp->flags |= RX_PKTFLAG_IOVQ;
1010#endif
1011 queue_Append(&call->iovq, cp)(((((struct rx_queue *)(cp))->prev=((struct rx_queue *)(&
call->iovq))->prev)->next=((struct rx_queue *)(cp)))
->next=((struct rx_queue *)(&call->iovq)), ((struct
rx_queue *)(&call->iovq))->prev=((struct rx_queue *
)(cp)))
;
1012#ifdef RXDEBUG_PACKET
1013 call->iovqc++;
1014#endif /* RXDEBUG_PACKET */
1015 tnFree = cp->length;
1016 tcurvec = 1;
1017 tcurpos =
1018 (char *)cp->wirevec[1].iov_base +
1019 call->conn->securityHeaderSize;
1020 tcurlen = cp->wirevec[1].iov_len - call->conn->securityHeaderSize;
1021 }
1022
1023 if (tnFree < nbytes) {
1024 /* try to extend the current packet */
1025 int len, mud;
1026 len = cp->length;
1027 mud = rx_MaxUserDataSize(call)((call)->MTU - sizeof (struct rx_header) - (call)->conn
->securityHeaderSize - (call)->conn->securityMaxTrailerSize
)
;
1028 if (mud > len) {
1029 int want;
1030 want = MIN(nbytes - tnFree, mud - len)(((nbytes - tnFree)<(mud - len))?(nbytes - tnFree):(mud - len
))
;
1031 rxi_AllocDataBuf(cp, want, RX_PACKET_CLASS_SEND_CBUF4);
1032 if (cp->length > (unsigned)mud)
1033 cp->length = mud;
1034 tnFree += (cp->length - len);
1035 if (cp == call->currentPacket) {
1036 call->nFree += (cp->length - len);
1037 }
1038 }
1039 }
1040
1041 /* fill in the next entry in the iovec */
1042 t = MIN(tcurlen, nbytes)(((tcurlen)<(nbytes))?(tcurlen):(nbytes));
1043 t = MIN(tnFree, t)(((tnFree)<(t))?(tnFree):(t));
1044 iov[nextio].iov_base = tcurpos;
1045 iov[nextio].iov_len = t;
1046 nbytes -= t;
1047 tcurpos += t;
1048 tcurlen -= t;
1049 tnFree -= t;
1050 nextio++;
1051
1052 if (!tcurlen) {
1053 /* need to get another struct iov */
1054 if (++tcurvec >= cp->niovecs) {
1055 /* current packet is full, extend it or move on to next packet */
1056 tnFree = 0;
1057 } else {
1058 tcurpos = (char *)cp->wirevec[tcurvec].iov_base;
1059 tcurlen = cp->wirevec[tcurvec].iov_len;
1060 }
1061 }
1062 } while (nbytes && nextio < maxio);
1063 *nio = nextio;
1064 return requestCount - nbytes;
1065}
1066
1067int
1068rx_WritevAlloc(struct rx_call *call, struct iovec *iov, int *nio, int maxio,
1069 int nbytes)
1070{
1071 int bytes;
1072 SPLVAR;
1073
1074 NETPRI;
1075 bytes = rxi_WritevAlloc(call, iov, nio, maxio, nbytes);
1076 USERPRI;
1077 return bytes;
1078}
1079
1080/* rxi_WritevProc -- internal version.
1081 *
1082 * Send buffers allocated in rxi_WritevAlloc.
1083 *
1084 * LOCKS USED -- called at netpri.
1085 */
1086int
1087rxi_WritevProc(struct rx_call *call, struct iovec *iov, int nio, int nbytes)
1088{
1089 struct rx_packet *cp = NULL((void *)0);
1090#ifdef RX_TRACK_PACKETS
1091 struct rx_packet *p, *np;
1092#endif
1093 int nextio;
1094 int requestCount;
1095 struct rx_queue tmpq;
1096#ifdef RXDEBUG_PACKET
1097 u_short tmpqc;
1098#endif
1099
1100 requestCount = nbytes;
1101 nextio = 0;
1102
1103 MUTEX_ENTER(&call->lock);
1104 if (call->error) {
1105 call->mode = RX_MODE_ERROR3;
1106 } else if (call->mode != RX_MODE_SENDING1) {
1107 call->error = RX_PROTOCOL_ERROR(-5);
1108 }
1109#ifdef AFS_GLOBAL_RXLOCK_KERNEL
1110 rxi_WaitforTQBusy(call);
1111#endif /* AFS_GLOBAL_RXLOCK_KERNEL */
1112 cp = call->currentPacket;
1113
1114 if (call->error) {
1115 call->mode = RX_MODE_ERROR3;
1116 MUTEX_EXIT(&call->lock);
1117 if (cp) {
1118#ifdef RX_TRACK_PACKETS
1119 cp->flags &= ~RX_PKTFLAG_CP;
1120 cp->flags |= RX_PKTFLAG_IOVQ;
1121#endif
1122 queue_Prepend(&call->iovq, cp)(((((struct rx_queue *)(cp))->next=((struct rx_queue *)(&
call->iovq))->next)->prev=((struct rx_queue *)(cp)))
->prev=((struct rx_queue *)(&call->iovq)), ((struct
rx_queue *)(&call->iovq))->next=((struct rx_queue *
)(cp)))
;
1123#ifdef RXDEBUG_PACKET
1124 call->iovqc++;
1125#endif /* RXDEBUG_PACKET */
1126 call->currentPacket = (struct rx_packet *)0;
1127 }
1128#ifdef RXDEBUG_PACKET
1129 call->iovqc -=
1130#endif /* RXDEBUG_PACKET */
1131 rxi_FreePackets(0, &call->iovq);
1132 return 0;
1133 }
1134
1135 /* Loop through the I/O vector adjusting packet pointers.
1136 * Place full packets back onto the iovq once they are ready
1137 * to send. Set RX_PROTOCOL_ERROR if any problems are found in
1138 * the iovec. We put the loop condition at the end to ensure that
1139 * a zero length write will push a short packet. */
1140 nextio = 0;
1141 queue_Init(&tmpq)(((struct rx_queue *)(&tmpq)))->prev = (((struct rx_queue
*)(&tmpq)))->next = (((struct rx_queue *)(&tmpq))
)
;
1142#ifdef RXDEBUG_PACKET
1143 tmpqc = 0;
1144#endif /* RXDEBUG_PACKET */
1145 do {
1146 if (call->nFree == 0 && cp) {
1147 clock_NewTime(); /* Bogus: need new time package */
1148 /* The 0, below, specifies that it is not the last packet:
1149 * there will be others. PrepareSendPacket may
1150 * alter the packet length by up to
1151 * conn->securityMaxTrailerSize */
1152 hadd32(call->bytesSent, cp->length)((void)((((call->bytesSent).low ^ (int)(cp->length)) &
0x80000000) ? (((((call->bytesSent).low + (int)(cp->length
)) & 0x80000000) == 0) && (call->bytesSent).high
++) : (((call->bytesSent).low & (int)(cp->length) &
0x80000000) && (call->bytesSent).high++)), (call->
bytesSent).low += (int)(cp->length))
;
1153 rxi_PrepareSendPacket(call, cp, 0);
1154#ifdef AFS_GLOBAL_RXLOCK_KERNEL
1155 /* PrepareSendPacket drops the call lock */
1156 rxi_WaitforTQBusy(call);
1157#endif /* AFS_GLOBAL_RXLOCK_KERNEL */
1158 queue_Append(&tmpq, cp)(((((struct rx_queue *)(cp))->prev=((struct rx_queue *)(&
tmpq))->prev)->next=((struct rx_queue *)(cp)))->next
=((struct rx_queue *)(&tmpq)), ((struct rx_queue *)(&
tmpq))->prev=((struct rx_queue *)(cp)))
;
1159#ifdef RXDEBUG_PACKET
1160 tmpqc++;
1161#endif /* RXDEBUG_PACKET */
1162 cp = call->currentPacket = (struct rx_packet *)0;
1163
1164 /* The head of the iovq is now the current packet */
1165 if (nbytes) {
1166 if (queue_IsEmpty(&call->iovq)(((struct rx_queue *)(&call->iovq))->next == ((struct
rx_queue *)(&call->iovq)))
) {
1167 MUTEX_EXIT(&call->lock);
1168 call->error = RX_PROTOCOL_ERROR(-5);
1169#ifdef RXDEBUG_PACKET
1170 tmpqc -=
1171#endif /* RXDEBUG_PACKET */
1172 rxi_FreePackets(0, &tmpq);
1173 return 0;
1174 }
1175 cp = queue_First(&call->iovq, rx_packet)((struct rx_packet *)((struct rx_queue *)(&call->iovq)
)->next)
;
1176 queue_Remove(cp)(((((struct rx_queue *)(cp))->prev->next=((struct rx_queue
*)(cp))->next)->prev=((struct rx_queue *)(cp))->prev
), ((struct rx_queue *)(cp))->next = 0)
;
1177#ifdef RX_TRACK_PACKETS
1178 cp->flags &= ~RX_PKTFLAG_IOVQ;
1179#endif
1180#ifdef RXDEBUG_PACKET
1181 call->iovqc--;
1182#endif /* RXDEBUG_PACKET */
1183#ifdef RX_TRACK_PACKETS
1184 cp->flags |= RX_PKTFLAG_CP;
1185#endif
1186 call->currentPacket = cp;
1187 call->nFree = cp->length;
1188 call->curvec = 1;
1189 call->curpos =
1190 (char *)cp->wirevec[1].iov_base +
1191 call->conn->securityHeaderSize;
1192 call->curlen =
1193 cp->wirevec[1].iov_len - call->conn->securityHeaderSize;
1194 }
1195 }
1196
1197 if (nbytes) {
1198 /* The next iovec should point to the current position */
1199 if (iov[nextio].iov_base != call->curpos
1200 || iov[nextio].iov_len > (int)call->curlen) {
1201 call->error = RX_PROTOCOL_ERROR(-5);
1202 MUTEX_EXIT(&call->lock);
1203 if (cp) {
1204#ifdef RX_TRACK_PACKETS
1205 cp->flags &= ~RX_PKTFLAG_CP;
1206#endif
1207 queue_Prepend(&tmpq, cp)(((((struct rx_queue *)(cp))->next=((struct rx_queue *)(&
tmpq))->next)->prev=((struct rx_queue *)(cp)))->prev
=((struct rx_queue *)(&tmpq)), ((struct rx_queue *)(&
tmpq))->next=((struct rx_queue *)(cp)))
;
1208#ifdef RXDEBUG_PACKET
1209 tmpqc++;
1210#endif /* RXDEBUG_PACKET */
1211 cp = call->currentPacket = (struct rx_packet *)0;
1212 }
1213#ifdef RXDEBUG_PACKET
1214 tmpqc -=
1215#endif /* RXDEBUG_PACKET */
1216 rxi_FreePackets(0, &tmpq);
1217 return 0;
1218 }
1219 nbytes -= iov[nextio].iov_len;
1220 call->curpos += iov[nextio].iov_len;
1221 call->curlen -= iov[nextio].iov_len;
1222 call->nFree -= iov[nextio].iov_len;
1223 nextio++;
1224 if (call->curlen == 0) {
1225 if (++call->curvec > cp->niovecs) {
1226 call->nFree = 0;
1227 } else {
1228 call->curpos = (char *)cp->wirevec[call->curvec].iov_base;
1229 call->curlen = cp->wirevec[call->curvec].iov_len;
1230 }
1231 }
1232 }
1233 } while (nbytes && nextio < nio);
1234
1235 /* Move the packets from the temporary queue onto the transmit queue.
1236 * We may end up with more than call->twind packets on the queue. */
1237
1238#ifdef RX_TRACK_PACKETS
1239 for (queue_Scan(&tmpq, p, np, rx_packet)(p) = ((struct rx_packet *)((struct rx_queue *)(&tmpq))->
next), np = ((struct rx_packet *)((struct rx_queue *)(p))->
next); !(((struct rx_queue *)(&tmpq)) == ((struct rx_queue
*)(p))); (p) = (np), np = ((struct rx_packet *)((struct rx_queue
*)(p))->next)
)
1240 {
1241 p->flags |= RX_PKTFLAG_TQ;
1242 }
1243#endif
1244
1245 if (call->error)
1246 call->mode = RX_MODE_ERROR3;
1247
1248 queue_SpliceAppend(&call->tq, &tmpq)if ((((struct rx_queue *)(((struct rx_queue *)(&tmpq))))->
next == ((struct rx_queue *)(((struct rx_queue *)(&tmpq))
)))); else ((((((struct rx_queue *)(&tmpq))->prev->
next=((struct rx_queue *)(&call->tq)))->prev->next
=((struct rx_queue *)(&tmpq))->next)->prev=((struct
rx_queue *)(&call->tq))->prev, ((struct rx_queue *
)(&call->tq))->prev=((struct rx_queue *)(&tmpq)
)->prev), (((struct rx_queue *)(((struct rx_queue *)(&
tmpq)))))->prev = (((struct rx_queue *)(((struct rx_queue *
)(&tmpq)))))->next = (((struct rx_queue *)(((struct rx_queue
*)(&tmpq))))))
;
1249
1250 /* If the call is in recovery, let it exhaust its current retransmit
1251 * queue before forcing it to send new packets
1252 */
1253 if (!(call->flags & RX_CALL_FAST_RECOVER2048)) {
1254 rxi_Start(call, 0);
1255 }
1256
1257 /* Wait for the length of the transmit queue to fall below call->twind */
1258 while (!call->error && call->tnext + 1 > call->tfirst + (2 * call->twind)) {
1259 clock_NewTime();
1260 call->startWait = clock_Sec()(time(((void *)0)));
1261#ifdef RX_ENABLE_LOCKS
1262 CV_WAIT(&call->cv_twind, &call->lock);
1263#else
1264 call->flags |= RX_CALL_WAIT_WINDOW_ALLOC2;
1265 osi_rxSleep(&call->twind)rxi_Sleep(&call->twind);
1266#endif
1267 call->startWait = 0;
1268 }
1269
1270 /* cp is no longer valid since we may have given up the lock */
1271 cp = call->currentPacket;
1272
1273 if (call->error) {
1274 call->mode = RX_MODE_ERROR3;
1275 call->currentPacket = NULL((void *)0);
1276 MUTEX_EXIT(&call->lock);
1277 if (cp) {
1278#ifdef RX_TRACK_PACKETS
1279 cp->flags &= ~RX_PKTFLAG_CP;
1280#endif
1281 rxi_FreePacket(cp);
1282 }
1283 return 0;
1284 }
1285 MUTEX_EXIT(&call->lock);
1286
1287 return requestCount - nbytes;
1288}
1289
1290int
1291rx_WritevProc(struct rx_call *call, struct iovec *iov, int nio, int nbytes)
1292{
1293 int bytes;
1294 SPLVAR;
1295
1296 NETPRI;
1297 bytes = rxi_WritevProc(call, iov, nio, nbytes);
1298 USERPRI;
1299 return bytes;
1300}
1301
1302/* Flush any buffered data to the stream, switch to read mode
1303 * (clients) or to EOF mode (servers)
1304 *
1305 * LOCKS HELD: called at netpri.
1306 */
1307void
1308rxi_FlushWrite(struct rx_call *call)
1309{
1310 struct rx_packet *cp = NULL((void *)0);
1311
1312 /* Free any packets from the last call to ReadvProc/WritevProc */
1313 if (queue_IsNotEmpty(&call->iovq)(((struct rx_queue *)(&call->iovq))->next != ((struct
rx_queue *)(&call->iovq)))
) {
1314#ifdef RXDEBUG_PACKET
1315 call->iovqc -=
1316#endif /* RXDEBUG_PACKET */
1317 rxi_FreePackets(0, &call->iovq);
1318 }
1319
1320 if (call->mode == RX_MODE_SENDING1) {
1321
1322 call->mode =
1323 (call->conn->type ==
1324 RX_CLIENT_CONNECTION0 ? RX_MODE_RECEIVING2 : RX_MODE_EOF4);
1325
1326#ifdef RX_KERNEL_TRACE
1327 {
1328 int glockOwner = ISAFS_GLOCK();
1329 if (!glockOwner)
1330 AFS_GLOCK();
1331 afs_Trace3(afs_iclSetp, CM_TRACE_WASHERE, ICL_TYPE_STRING,
1332 __FILE__"rx_rdwr.c", ICL_TYPE_INT32, __LINE__1332, ICL_TYPE_POINTER,
1333 call);
1334 if (!glockOwner)
1335 AFS_GUNLOCK();
1336 }
1337#endif
1338
1339 MUTEX_ENTER(&call->lock);
1340 if (call->error)
1341 call->mode = RX_MODE_ERROR3;
1342
1343 cp = call->currentPacket;
1344
1345 if (cp) {
1346 /* cp->length is only supposed to be the user's data */
1347 /* cp->length was already set to (then-current)
1348 * MaxUserDataSize or less. */
1349#ifdef RX_TRACK_PACKETS
1350 cp->flags &= ~RX_PKTFLAG_CP;
1351#endif
1352 cp->length -= call->nFree;
1353 call->currentPacket = (struct rx_packet *)0;
1354 call->nFree = 0;
1355 } else {
1356 cp = rxi_AllocSendPacket(call, 0);
1357 if (!cp) {
1358 /* Mode can no longer be MODE_SENDING */
1359 return;
1360 }
1361 cp->length = 0;
1362 cp->niovecs = 2; /* header + space for rxkad stuff */
1363 call->nFree = 0;
1364 }
1365
1366 /* The 1 specifies that this is the last packet */
1367 hadd32(call->bytesSent, cp->length)((void)((((call->bytesSent).low ^ (int)(cp->length)) &
0x80000000) ? (((((call->bytesSent).low + (int)(cp->length
)) & 0x80000000) == 0) && (call->bytesSent).high
++) : (((call->bytesSent).low & (int)(cp->length) &
0x80000000) && (call->bytesSent).high++)), (call->
bytesSent).low += (int)(cp->length))
;
1368 rxi_PrepareSendPacket(call, cp, 1);
1369#ifdef AFS_GLOBAL_RXLOCK_KERNEL
1370 /* PrepareSendPacket drops the call lock */
1371 rxi_WaitforTQBusy(call);
1372#endif /* AFS_GLOBAL_RXLOCK_KERNEL */
1373#ifdef RX_TRACK_PACKETS
1374 cp->flags |= RX_PKTFLAG_TQ;
1375#endif
1376 queue_Append(&call->tq, cp)(((((struct rx_queue *)(cp))->prev=((struct rx_queue *)(&
call->tq))->prev)->next=((struct rx_queue *)(cp)))->
next=((struct rx_queue *)(&call->tq)), ((struct rx_queue
*)(&call->tq))->prev=((struct rx_queue *)(cp)))
;
1377#ifdef RXDEBUG_PACKET
1378 call->tqc++;
1379#endif /* RXDEBUG_PACKET */
1380
1381 /* If the call is in recovery, let it exhaust its current retransmit
1382 * queue before forcing it to send new packets
1383 */
1384 if (!(call->flags & RX_CALL_FAST_RECOVER2048)) {
1385 rxi_Start(call, 0);
1386 }
1387 MUTEX_EXIT(&call->lock);
1388 }
1389}
1390
1391/* Flush any buffered data to the stream, switch to read mode
1392 * (clients) or to EOF mode (servers) */
1393void
1394rx_FlushWrite(struct rx_call *call)
1395{
1396 SPLVAR;
1397 NETPRI;
1398 rxi_FlushWrite(call);
1399 USERPRI;
1400}