File: | afs/afs_server.c |
Location: | line 786, column 5 |
Description: | Value stored to 'rxconns' is never read |
1 | /* |
2 | * Copyright 2000, International Business Machines Corporation and others. |
3 | * All Rights Reserved. |
4 | * |
5 | * This software has been released under the terms of the IBM Public |
6 | * License. For details, see the LICENSE file in the top-level source |
7 | * directory or online at http://www.openafs.org/dl/license10.html |
8 | */ |
9 | |
10 | /* |
11 | * Implements: |
12 | * afs_MarkServerUpOrDown |
13 | * afs_ServerDown |
14 | * afs_CountServers |
15 | * afs_CheckServers |
16 | * afs_FindServer |
17 | * afs_random |
18 | * afs_randomMod127 |
19 | * afs_SortServers |
20 | * afsi_SetServerIPRank |
21 | * afs_GetServer |
22 | * afs_ActivateServer |
23 | * |
24 | * |
25 | * Local: |
26 | * HaveCallBacksFrom |
27 | * CheckVLServer |
28 | * afs_SortOneServer |
29 | * afs_SetServerPrefs |
30 | * |
31 | */ |
32 | #include <afsconfig.h> |
33 | #include "afs/param.h" |
34 | |
35 | |
36 | #include "afs/stds.h" |
37 | #include "afs/sysincludes.h" /* Standard vendor system headers */ |
38 | |
39 | #if !defined(UKERNEL1) |
40 | #if !defined(AFS_LINUX20_ENV) |
41 | #include <net/if.h> |
42 | #endif |
43 | #include <netinet/in.h> |
44 | |
45 | #ifdef AFS_SGI62_ENV |
46 | #include "h/hashing.h" |
47 | #endif |
48 | #if !defined(AFS_HPUX110_ENV) && !defined(AFS_LINUX20_ENV) && !defined(AFS_DARWIN_ENV) |
49 | #include <netinet/in_var.h> |
50 | #endif /* AFS_HPUX110_ENV */ |
51 | #ifdef AFS_DARWIN_ENV |
52 | #include <net/if_var.h> |
53 | #endif |
54 | #endif /* !defined(UKERNEL) */ |
55 | |
56 | #include "afsincludes.h" /* Afs-based standard headers */ |
57 | #include "afs/afs_stats.h" /* afs statistics */ |
58 | #include "rx/rx_multi.h" |
59 | |
60 | #if defined(AFS_SUN5_ENV) |
61 | #include <inet/led.h> |
62 | #include <inet/common.h> |
63 | #include <netinet/ip6.h> |
64 | #define ipif_local_addr ipif_lcl_addr |
65 | #ifndef V4_PART_OF_V6 |
66 | # define V4_PART_OF_V6(v6) v6.s6_addr32[3] |
67 | #endif |
68 | #include <inet/ip.h> |
69 | #endif |
70 | |
71 | /* Exported variables */ |
72 | afs_rwlock_t afs_xserver; /* allocation lock for servers */ |
73 | struct server *afs_setTimeHost = 0; /* last host we used for time */ |
74 | struct server *afs_servers[NSERVERS16]; /* Hashed by server`s uuid & 1st ip */ |
75 | afs_rwlock_t afs_xsrvAddr; /* allocation lock for srvAddrs */ |
76 | struct srvAddr *afs_srvAddrs[NSERVERS16]; /* Hashed by server's ip */ |
77 | |
78 | |
79 | /* debugging aids - number of alloc'd server and srvAddr structs. */ |
80 | int afs_reuseServers = 0; |
81 | int afs_reuseSrvAddrs = 0; |
82 | int afs_totalServers = 0; |
83 | int afs_totalSrvAddrs = 0; |
84 | |
85 | |
86 | |
87 | static struct afs_stats_SrvUpDownInfo * |
88 | GetUpDownStats(struct server *srv) |
89 | { |
90 | struct afs_stats_SrvUpDownInfo *upDownP; |
91 | u_short fsport = AFS_FSPORT((unsigned short) (__builtin_constant_p(7000) ? (__uint16_t)( ((__uint16_t)(7000)) << 8 | ((__uint16_t)(7000)) >> 8) : __bswap16_var(7000))); |
92 | |
93 | if (srv->cell) |
94 | fsport = srv->cell->fsport; |
95 | |
96 | if (srv->addr->sa_portal == fsport) |
97 | upDownP = afs_stats_cmperf.fs_UpDown; |
98 | else |
99 | upDownP = afs_stats_cmperf.vl_UpDown; |
100 | |
101 | if (srv->cell && afs_IsPrimaryCell(srv->cell)) |
102 | return &upDownP[AFS_STATS_UPDOWN_IDX_SAME_CELL0]; |
103 | else |
104 | return &upDownP[AFS_STATS_UPDOWN_IDX_DIFF_CELL1]; |
105 | } |
106 | |
107 | |
108 | /*------------------------------------------------------------------------ |
109 | * afs_MarkServerUpOrDown |
110 | * |
111 | * Description: |
112 | * Mark the given server up or down, and track its uptime stats. |
113 | * |
114 | * Arguments: |
115 | * a_serverP : Ptr to server record to fiddle with. |
116 | * a_isDown : Is the server is to be marked down? |
117 | * |
118 | * Returns: |
119 | * Nothing. |
120 | * |
121 | * Environment: |
122 | * The CM server structures must be write-locked. |
123 | * |
124 | * Side Effects: |
125 | * As advertised. |
126 | *------------------------------------------------------------------------*/ |
127 | |
128 | void |
129 | afs_MarkServerUpOrDown(struct srvAddr *sa, int a_isDown) |
130 | { |
131 | struct server *a_serverP = sa->server; |
132 | struct srvAddr *sap; |
133 | osi_timeval_t currTime, *currTimeP; /*Current time */ |
134 | afs_int32 downTime; /*Computed downtime, in seconds */ |
135 | struct afs_stats_SrvUpDownInfo *upDownP; /*Ptr to up/down info record */ |
136 | |
137 | /* |
138 | * If the server record is marked the same as the new status we've |
139 | * been fed, then there isn't much to be done. |
140 | */ |
141 | if ((a_isDown && (sa->sa_flags & SRVADDR_ISDOWN0x20)) |
142 | || (!a_isDown && !(sa->sa_flags & SRVADDR_ISDOWN0x20))) |
143 | return; |
144 | |
145 | if (a_isDown) { |
146 | sa->sa_flags |= SRVADDR_ISDOWN0x20; |
147 | for (sap = a_serverP->addr; sap; sap = sap->next_sa) { |
148 | if (!(sap->sa_flags & SRVADDR_ISDOWN0x20)) { |
149 | /* Not all ips are up so don't bother with the |
150 | * server's up/down stats */ |
151 | return; |
152 | } |
153 | } |
154 | /* |
155 | * All ips are down we treat the whole server down |
156 | */ |
157 | a_serverP->flags |= SRVR_ISDOWN0x20; |
158 | /* |
159 | * If this was our time server, search for another time server |
160 | */ |
161 | if (a_serverP == afs_setTimeHost) |
162 | afs_setTimeHost = 0; |
163 | } else { |
164 | sa->sa_flags &= ~SRVADDR_ISDOWN0x20; |
165 | /* If any ips are up, the server is also marked up */ |
166 | a_serverP->flags &= ~SRVR_ISDOWN0x20; |
167 | for (sap = a_serverP->addr; sap; sap = sap->next_sa) { |
168 | if (sap->sa_flags & SRVADDR_ISDOWN0x20) { |
169 | /* Not all ips are up so don't bother with the |
170 | * server's up/down stats */ |
171 | return; |
172 | } |
173 | } |
174 | } |
175 | #ifndef AFS_NOSTATS |
176 | /* |
177 | * Compute the current time and which overall stats record is to be |
178 | * updated; we'll need them one way or another. |
179 | */ |
180 | currTimeP = &currTime; |
181 | osi_GetuTime(currTimeP)osi_GetTime(currTimeP); |
182 | |
183 | upDownP = GetUpDownStats(a_serverP); |
184 | |
185 | if (a_isDown) { |
186 | /* |
187 | * Server going up -> down; remember the beginning of this |
188 | * downtime incident. |
189 | */ |
190 | a_serverP->lastDowntimeStart = currTime.tv_sec; |
191 | |
192 | (upDownP->numDownRecords)++; |
193 | (upDownP->numUpRecords)--; |
194 | } /*Server being marked down */ |
195 | else { |
196 | /* |
197 | * Server going down -> up; remember everything about this |
198 | * newly-completed downtime incident. |
199 | */ |
200 | downTime = currTime.tv_sec - a_serverP->lastDowntimeStart; |
201 | (a_serverP->numDowntimeIncidents)++; |
202 | a_serverP->sumOfDowntimes += downTime; |
203 | |
204 | (upDownP->numUpRecords)++; |
205 | (upDownP->numDownRecords)--; |
206 | (upDownP->numDowntimeIncidents)++; |
207 | if (a_serverP->numDowntimeIncidents == 1) |
208 | (upDownP->numRecordsNeverDown)--; |
209 | upDownP->sumOfDowntimes += downTime; |
210 | if ((upDownP->shortestDowntime == 0) |
211 | || (downTime < upDownP->shortestDowntime)) |
212 | upDownP->shortestDowntime = downTime; |
213 | if ((upDownP->longestDowntime == 0) |
214 | || (downTime > upDownP->longestDowntime)) |
215 | upDownP->longestDowntime = downTime; |
216 | |
217 | |
218 | if (downTime <= AFS_STATS_MAX_DOWNTIME_DURATION_BUCKET0600) |
219 | (upDownP->downDurations[0])++; |
220 | else if (downTime <= AFS_STATS_MAX_DOWNTIME_DURATION_BUCKET11800) |
221 | (upDownP->downDurations[1])++; |
222 | else if (downTime <= AFS_STATS_MAX_DOWNTIME_DURATION_BUCKET23600) |
223 | (upDownP->downDurations[2])++; |
224 | else if (downTime <= AFS_STATS_MAX_DOWNTIME_DURATION_BUCKET37200) |
225 | (upDownP->downDurations[3])++; |
226 | else if (downTime <= AFS_STATS_MAX_DOWNTIME_DURATION_BUCKET414400) |
227 | (upDownP->downDurations[4])++; |
228 | else if (downTime <= AFS_STATS_MAX_DOWNTIME_DURATION_BUCKET528800) |
229 | (upDownP->downDurations[5])++; |
230 | else |
231 | (upDownP->downDurations[6])++; |
232 | |
233 | } /*Server being marked up */ |
234 | #endif |
235 | } /*MarkServerUpOrDown */ |
236 | |
237 | |
238 | afs_int32 |
239 | afs_ServerDown(struct srvAddr *sa) |
240 | { |
241 | struct server *aserver = sa->server; |
242 | |
243 | AFS_STATCNT(ServerDown)((afs_cmstats.callInfo.C_ServerDown)++); |
244 | if (aserver->flags & SRVR_ISDOWN0x20 || sa->sa_flags & SRVADDR_ISDOWN0x20) |
245 | return 0; |
246 | afs_MarkServerUpOrDown(sa, SRVR_ISDOWN0x20); |
247 | if (sa->sa_portal == aserver->cell->vlport) |
248 | print_internet_address |
249 | ("afs: Lost contact with volume location server ", sa, "", 1); |
250 | else |
251 | print_internet_address("afs: Lost contact with file server ", sa, "", |
252 | 1); |
253 | return 1; |
254 | } /*ServerDown */ |
255 | |
256 | |
257 | /* return true if we have any callback promises from this server */ |
258 | int |
259 | afs_HaveCallBacksFrom(struct server *aserver) |
260 | { |
261 | afs_int32 now; |
262 | int i; |
263 | struct vcache *tvc; |
264 | |
265 | AFS_STATCNT(HaveCallBacksFrom)((afs_cmstats.callInfo.C_HaveCallBacksFrom)++); |
266 | now = osi_Time()(time(((void *)0))); /* for checking for expired callbacks */ |
267 | for (i = 0; i < VCSIZE1024; i++) { /* for all guys in the hash table */ |
268 | for (tvc = afs_vhashT[i]; tvc; tvc = tvc->hnext) { |
269 | /* |
270 | * Check to see if this entry has an unexpired callback promise |
271 | * from the required host |
272 | */ |
273 | if (aserver == tvc->callback && tvc->cbExpires >= now |
274 | && ((tvc->f.states & CRO0x00000004) == 0)) |
275 | return 1; |
276 | } |
277 | } |
278 | return 0; |
279 | |
280 | } /*HaveCallBacksFrom */ |
281 | |
282 | |
283 | static void |
284 | CheckVLServer(struct srvAddr *sa, struct vrequest *areq) |
285 | { |
286 | struct server *aserver = sa->server; |
287 | struct afs_conn *tc; |
288 | afs_int32 code; |
289 | struct rx_connection *rxconn; |
290 | |
291 | AFS_STATCNT(CheckVLServer)((afs_cmstats.callInfo.C_CheckVLServer)++); |
292 | /* Ping dead servers to see if they're back */ |
293 | if (!((aserver->flags & SRVR_ISDOWN0x20) || (sa->sa_flags & SRVADDR_ISDOWN0x20)) |
294 | || (aserver->flags & SRVR_ISGONE0x80)) |
295 | return; |
296 | if (!aserver->cell) |
297 | return; /* can't do much */ |
298 | |
299 | tc = afs_ConnByHost(aserver, aserver->cell->vlport, |
300 | aserver->cell->cellNum, areq, 1, SHARED_LOCK4, &rxconn); |
301 | if (!tc) |
302 | return; |
303 | rx_SetConnDeadTime(rxconn, 3); |
304 | |
305 | RX_AFS_GUNLOCK()do { do { if (!(pthread_self() == afs_global_owner)) { osi_Panic ("afs global lock not held"); } } while(0); memset(&afs_global_owner , 0, sizeof(pthread_t)); do{if (!(pthread_mutex_unlock(&afs_global_lock ) == 0)) AssertionFailed("/home/wollman/openafs/src/afs/afs_server.c" , 305);}while(0); } while(0); |
306 | code = VL_ProbeServer(rxconn); |
307 | RX_AFS_GLOCK()do { do{if (!(pthread_mutex_lock(&afs_global_lock) == 0)) AssertionFailed("/home/wollman/openafs/src/afs/afs_server.c" , 307);}while(0); afs_global_owner = pthread_self(); } while( 0); |
308 | rx_SetConnDeadTime(rxconn, afs_rx_deadtime); |
309 | afs_PutConn(tc, rxconn, SHARED_LOCK4); |
310 | /* |
311 | * If probe worked, or probe call not yet defined (for compatibility |
312 | * with old vlsevers), then we treat this server as running again |
313 | */ |
314 | if (code == 0 || (code <= -450 && code >= -470)) { |
315 | if (tc->parent->srvr == sa) { |
316 | afs_MarkServerUpOrDown(sa, 0); |
317 | print_internet_address("afs: volume location server ", sa, |
318 | " is back up", 2); |
319 | } |
320 | } |
321 | |
322 | } /*CheckVLServer */ |
323 | |
324 | |
325 | #ifndef AFS_MINCHANGE2 /* So that some can increase it in param.h */ |
326 | #define AFS_MINCHANGE2 2 /* min change we'll bother with */ |
327 | #endif |
328 | #ifndef AFS_MAXCHANGEBACK10 |
329 | #define AFS_MAXCHANGEBACK10 10 /* max seconds we'll set a clock back at once */ |
330 | #endif |
331 | |
332 | |
333 | /*------------------------------------------------------------------------ |
334 | * EXPORTED afs_CountServers |
335 | * |
336 | * Description: |
337 | * Originally meant to count the number of servers and determining |
338 | * up/down info, this routine will now simply sum up all of the |
339 | * server record ages. All other up/down information is kept on the |
340 | * fly. |
341 | * |
342 | * Arguments: |
343 | * None. |
344 | * |
345 | * Returns: |
346 | * Nothing. |
347 | * |
348 | * Environment: |
349 | * This routine locks afs_xserver for write for the duration. |
350 | * |
351 | * Side Effects: |
352 | * Set CM perf stats field sumOfRecordAges for all server record |
353 | * entries. |
354 | *------------------------------------------------------------------------*/ |
355 | |
356 | void |
357 | afs_CountServers(void) |
358 | { |
359 | int currIdx; /*Curr idx into srv table */ |
360 | struct server *currSrvP; /*Ptr to curr server record */ |
361 | afs_int32 currChainLen; /*Length of curr hash chain */ |
362 | osi_timeval_t currTime; /*Current time */ |
363 | osi_timeval_t *currTimeP; /*Ptr to above */ |
364 | afs_int32 srvRecordAge; /*Age of server record, in secs */ |
365 | struct afs_stats_SrvUpDownInfo *upDownP; /*Ptr to current up/down |
366 | * info being manipulated */ |
367 | |
368 | /* |
369 | * Write-lock the server table so we don't get any interference. |
370 | */ |
371 | ObtainReadLock(&afs_xserver)do { ; if (!((&afs_xserver)->excl_locked & 2)) ((& afs_xserver)->readers_reading)++; else Afs_Lock_Obtain(& afs_xserver, 1); (&afs_xserver)->pid_last_reader = (get_user_struct ()->u_procp->p_pid ); } while (0); |
372 | |
373 | /* |
374 | * Iterate over each hash index in the server table, walking down each |
375 | * chain and tallying what we haven't computed from the records there on |
376 | * the fly. First, though, initialize the tallies that will change. |
377 | */ |
378 | afs_stats_cmperf.srvMaxChainLength = 0; |
379 | |
380 | afs_stats_cmperf.fs_UpDown[0].sumOfRecordAges = 0; |
381 | afs_stats_cmperf.fs_UpDown[0].ageOfYoungestRecord = 0; |
382 | afs_stats_cmperf.fs_UpDown[0].ageOfOldestRecord = 0; |
383 | memset(afs_stats_cmperf.fs_UpDown[0].downIncidents, 0, |
384 | AFS_STATS_NUM_DOWNTIME_INCIDENTS_BUCKETS6 * sizeof(afs_int32)); |
385 | |
386 | afs_stats_cmperf.fs_UpDown[1].sumOfRecordAges = 0; |
387 | afs_stats_cmperf.fs_UpDown[1].ageOfYoungestRecord = 0; |
388 | afs_stats_cmperf.fs_UpDown[1].ageOfOldestRecord = 0; |
389 | memset(afs_stats_cmperf.fs_UpDown[1].downIncidents, 0, |
390 | AFS_STATS_NUM_DOWNTIME_INCIDENTS_BUCKETS6 * sizeof(afs_int32)); |
391 | |
392 | afs_stats_cmperf.vl_UpDown[0].sumOfRecordAges = 0; |
393 | afs_stats_cmperf.vl_UpDown[0].ageOfYoungestRecord = 0; |
394 | afs_stats_cmperf.vl_UpDown[0].ageOfOldestRecord = 0; |
395 | memset(afs_stats_cmperf.vl_UpDown[0].downIncidents, 0, |
396 | AFS_STATS_NUM_DOWNTIME_INCIDENTS_BUCKETS6 * sizeof(afs_int32)); |
397 | |
398 | afs_stats_cmperf.vl_UpDown[1].sumOfRecordAges = 0; |
399 | afs_stats_cmperf.vl_UpDown[1].ageOfYoungestRecord = 0; |
400 | afs_stats_cmperf.vl_UpDown[1].ageOfOldestRecord = 0; |
401 | memset(afs_stats_cmperf.vl_UpDown[1].downIncidents, 0, |
402 | AFS_STATS_NUM_DOWNTIME_INCIDENTS_BUCKETS6 * sizeof(afs_int32)); |
403 | |
404 | /* |
405 | * Compute the current time, used to figure out server record ages. |
406 | */ |
407 | currTimeP = &currTime; |
408 | osi_GetuTime(currTimeP)osi_GetTime(currTimeP); |
409 | |
410 | /* |
411 | * Sweep the server hash table, tallying all we need to know. |
412 | */ |
413 | for (currIdx = 0; currIdx < NSERVERS16; currIdx++) { |
414 | currChainLen = 0; |
415 | for (currSrvP = afs_servers[currIdx]; currSrvP; |
416 | currSrvP = currSrvP->next) { |
417 | /* |
418 | * Bump the current chain length. |
419 | */ |
420 | currChainLen++; |
421 | |
422 | /* |
423 | * Any further tallying for this record will only be done if it has |
424 | * been activated. |
425 | */ |
426 | if ((currSrvP->flags & AFS_SERVER_FLAG_ACTIVATED0x01) |
427 | && currSrvP->addr && currSrvP->cell) { |
428 | |
429 | /* |
430 | * Compute the current server record's age, then remember it |
431 | * in the appropriate places. |
432 | */ |
433 | srvRecordAge = currTime.tv_sec - currSrvP->activationTime; |
434 | upDownP = GetUpDownStats(currSrvP); |
435 | upDownP->sumOfRecordAges += srvRecordAge; |
436 | if ((upDownP->ageOfYoungestRecord == 0) |
437 | || (srvRecordAge < upDownP->ageOfYoungestRecord)) |
438 | upDownP->ageOfYoungestRecord = srvRecordAge; |
439 | if ((upDownP->ageOfOldestRecord == 0) |
440 | || (srvRecordAge > upDownP->ageOfOldestRecord)) |
441 | upDownP->ageOfOldestRecord = srvRecordAge; |
442 | |
443 | if (currSrvP->numDowntimeIncidents <= |
444 | AFS_STATS_MAX_DOWNTIME_INCIDENTS_BUCKET00) |
445 | (upDownP->downIncidents[0])++; |
446 | else if (currSrvP->numDowntimeIncidents <= |
447 | AFS_STATS_MAX_DOWNTIME_INCIDENTS_BUCKET11) |
448 | (upDownP->downIncidents[1])++; |
449 | else if (currSrvP->numDowntimeIncidents <= |
450 | AFS_STATS_MAX_DOWNTIME_INCIDENTS_BUCKET25) |
451 | (upDownP->downIncidents[2])++; |
452 | else if (currSrvP->numDowntimeIncidents <= |
453 | AFS_STATS_MAX_DOWNTIME_INCIDENTS_BUCKET310) |
454 | (upDownP->downIncidents[3])++; |
455 | else if (currSrvP->numDowntimeIncidents <= |
456 | AFS_STATS_MAX_DOWNTIME_INCIDENTS_BUCKET450) |
457 | (upDownP->downIncidents[4])++; |
458 | else |
459 | (upDownP->downIncidents[5])++; |
460 | |
461 | |
462 | } /*Current server has been active */ |
463 | } /*Walk this chain */ |
464 | |
465 | /* |
466 | * Before advancing to the next chain, remember facts about this one. |
467 | */ |
468 | if (currChainLen > afs_stats_cmperf.srvMaxChainLength) { |
469 | /* |
470 | * We beat out the former champion (which was initially set to 0 |
471 | * here). Mark down the new winner, and also remember if it's an |
472 | * all-time winner. |
473 | */ |
474 | afs_stats_cmperf.srvMaxChainLength = currChainLen; |
475 | if (currChainLen > afs_stats_cmperf.srvMaxChainLengthHWM) |
476 | afs_stats_cmperf.srvMaxChainLengthHWM = currChainLen; |
477 | } /*Update chain length maximum */ |
478 | } /*For each hash chain */ |
479 | |
480 | /* |
481 | * We're done. Unlock the server table before returning to our caller. |
482 | */ |
483 | ReleaseReadLock(&afs_xserver)do { ; if (!(--((&afs_xserver)->readers_reading)) && (&afs_xserver)->wait_states) Afs_Lock_ReleaseW(&afs_xserver ) ; if ( (&afs_xserver)->pid_last_reader == (get_user_struct ()->u_procp->p_pid ) ) (&afs_xserver)->pid_last_reader =0; } while (0); |
484 | |
485 | } /*afs_CountServers */ |
486 | |
487 | |
488 | void |
489 | ForceAllNewConnections(void) |
490 | { |
491 | int srvAddrCount; |
492 | struct srvAddr **addrs; |
493 | struct srvAddr *sa; |
494 | afs_int32 i, j; |
495 | |
496 | ObtainReadLock(&afs_xserver)do { ; if (!((&afs_xserver)->excl_locked & 2)) ((& afs_xserver)->readers_reading)++; else Afs_Lock_Obtain(& afs_xserver, 1); (&afs_xserver)->pid_last_reader = (get_user_struct ()->u_procp->p_pid ); } while (0); /* Necessary? */ |
497 | ObtainReadLock(&afs_xsrvAddr)do { ; if (!((&afs_xsrvAddr)->excl_locked & 2)) (( &afs_xsrvAddr)->readers_reading)++; else Afs_Lock_Obtain (&afs_xsrvAddr, 1); (&afs_xsrvAddr)->pid_last_reader = (get_user_struct()->u_procp->p_pid ); } while (0); |
498 | |
499 | srvAddrCount = 0; |
500 | for (i = 0; i < NSERVERS16; i++) { |
501 | for (sa = afs_srvAddrs[i]; sa; sa = sa->next_bkt) { |
502 | srvAddrCount++; |
503 | } |
504 | } |
505 | |
506 | addrs = afs_osi_Alloc(srvAddrCount * sizeof(*addrs)); |
507 | osi_Assert(addrs != NULL)(void)((addrs != ((void *)0)) || (osi_AssertFailK( "addrs != NULL" , "/home/wollman/openafs/src/afs/afs_server.c", 507), 0)); |
508 | j = 0; |
509 | for (i = 0; i < NSERVERS16; i++) { |
510 | for (sa = afs_srvAddrs[i]; sa; sa = sa->next_bkt) { |
511 | if (j >= srvAddrCount) |
512 | break; |
513 | addrs[j++] = sa; |
514 | } |
515 | } |
516 | |
517 | ReleaseReadLock(&afs_xsrvAddr)do { ; if (!(--((&afs_xsrvAddr)->readers_reading)) && (&afs_xsrvAddr)->wait_states) Afs_Lock_ReleaseW(& afs_xsrvAddr) ; if ( (&afs_xsrvAddr)->pid_last_reader == (get_user_struct()->u_procp->p_pid ) ) (&afs_xsrvAddr )->pid_last_reader =0; } while (0); |
518 | ReleaseReadLock(&afs_xserver)do { ; if (!(--((&afs_xserver)->readers_reading)) && (&afs_xserver)->wait_states) Afs_Lock_ReleaseW(&afs_xserver ) ; if ( (&afs_xserver)->pid_last_reader == (get_user_struct ()->u_procp->p_pid ) ) (&afs_xserver)->pid_last_reader =0; } while (0); |
519 | for (i = 0; i < j; i++) { |
520 | sa = addrs[i]; |
521 | ForceNewConnections(sa); |
522 | } |
523 | } |
524 | |
525 | static void |
526 | CkSrv_MarkUpDown(struct afs_conn **conns, int nconns, afs_int32 *results) |
527 | { |
528 | struct srvAddr *sa; |
529 | struct afs_conn *tc; |
530 | afs_int32 i; |
531 | |
532 | for(i = 0; i < nconns; i++){ |
533 | tc = conns[i]; |
534 | sa = tc->parent->srvr; |
535 | |
536 | if (( results[i] >= 0 ) && (sa->sa_flags & SRVADDR_ISDOWN0x20) && |
537 | (tc->parent->srvr == sa)) { |
538 | /* server back up */ |
539 | print_internet_address("afs: file server ", sa, " is back up", 2); |
540 | |
541 | ObtainWriteLock(&afs_xserver, 244)do { ; if (!(&afs_xserver)->excl_locked && !(& afs_xserver)->readers_reading) (&afs_xserver) -> excl_locked = 2; else Afs_Lock_Obtain(&afs_xserver, 2); (&afs_xserver )->pid_writer = (get_user_struct()->u_procp->p_pid ) ; (&afs_xserver)->src_indicator = 244; } while (0); |
542 | ObtainWriteLock(&afs_xsrvAddr, 245)do { ; if (!(&afs_xsrvAddr)->excl_locked && !( &afs_xsrvAddr)->readers_reading) (&afs_xsrvAddr) -> excl_locked = 2; else Afs_Lock_Obtain(&afs_xsrvAddr, 2); (&afs_xsrvAddr)->pid_writer = (get_user_struct()-> u_procp->p_pid ); (&afs_xsrvAddr)->src_indicator = 245 ; } while (0); |
543 | afs_MarkServerUpOrDown(sa, 0); |
544 | ReleaseWriteLock(&afs_xsrvAddr)do { ; (&afs_xsrvAddr)->excl_locked &= ~2; if ((& afs_xsrvAddr)->wait_states) Afs_Lock_ReleaseR(&afs_xsrvAddr ); (&afs_xsrvAddr)->pid_writer=0; } while (0); |
545 | ReleaseWriteLock(&afs_xserver)do { ; (&afs_xserver)->excl_locked &= ~2; if ((& afs_xserver)->wait_states) Afs_Lock_ReleaseR(&afs_xserver ); (&afs_xserver)->pid_writer=0; } while (0); |
546 | |
547 | if (afs_waitForeverCount) { |
548 | afs_osi_Wakeup(&afs_waitForever); |
549 | } |
550 | } else { |
551 | if (results[i] < 0) { |
552 | /* server crashed */ |
553 | afs_ServerDown(sa); |
554 | ForceNewConnections(sa); /* multi homed clients */ |
555 | } |
556 | } |
557 | } |
558 | } |
559 | |
560 | void |
561 | CkSrv_SetTime(struct rx_connection **rxconns, int nconns, int nservers, |
562 | struct afs_conn **conns, struct srvAddr **addrs) |
563 | { |
564 | struct afs_conn *tc; |
565 | afs_int32 start, end = 0, delta; |
566 | osi_timeval_t tv; |
567 | struct srvAddr *sa; |
568 | afs_int32 *conntimer, *results, *deltas; |
569 | afs_int32 i = 0; |
570 | char tbuffer[CVBS12]; |
571 | |
572 | conntimer = afs_osi_Alloc(nservers * sizeof (afs_int32)); |
573 | osi_Assert(conntimer != NULL)(void)((conntimer != ((void *)0)) || (osi_AssertFailK( "conntimer != NULL" , "/home/wollman/openafs/src/afs/afs_server.c", 573), 0)); |
574 | results = afs_osi_Alloc(nservers * sizeof (afs_int32)); |
575 | osi_Assert(results != NULL)(void)((results != ((void *)0)) || (osi_AssertFailK( "results != NULL" , "/home/wollman/openafs/src/afs/afs_server.c", 575), 0)); |
576 | deltas = afs_osi_Alloc(nservers * sizeof (afs_int32)); |
577 | osi_Assert(deltas != NULL)(void)((deltas != ((void *)0)) || (osi_AssertFailK( "deltas != NULL" , "/home/wollman/openafs/src/afs/afs_server.c", 577), 0)); |
578 | |
579 | /* make sure we're starting from zero */ |
580 | memset(&deltas, 0, sizeof(deltas)); |
581 | |
582 | start = osi_Time()(time(((void *)0))); /* time the gettimeofday call */ |
583 | AFS_GUNLOCK()do { do { if (!(pthread_self() == afs_global_owner)) { osi_Panic ("afs global lock not held"); } } while(0); memset(&afs_global_owner , 0, sizeof(pthread_t)); do{if (!(pthread_mutex_unlock(&afs_global_lock ) == 0)) AssertionFailed("/home/wollman/openafs/src/afs/afs_server.c" , 583);}while(0); } while(0); |
584 | if ( afs_setTimeHost == NULL((void *)0) ) { |
585 | multi_Rx(rxconns,nconns)do { struct multi_handle *multi_h; int multi_i; int multi_i0; afs_int32 multi_error; struct rx_call *multi_call; multi_h = multi_Init(rxconns, nconns); for (multi_i0 = multi_i = 0; ; multi_i = multi_i0 ) |
586 | { |
587 | tv.tv_sec = tv.tv_usec = 0; |
588 | multi_RXAFS_GetTime(if (multi_h->nextReady == multi_h->firstNotReady && multi_i < multi_h->nConns) { multi_call = multi_h-> calls[multi_i]; if (multi_call) { StartRXAFS_GetTime(multi_call ); rx_FlushWrite(multi_call); } multi_i0++; continue; } if (( multi_i = multi_Select(multi_h)) < 0) break; multi_call = multi_h ->calls[multi_i]; multi_error = rx_EndCall(multi_call, EndRXAFS_GetTime (multi_call, (afs_uint32 *)&tv.tv_sec, (afs_uint32 *)& tv.tv_usec)); multi_h->calls[multi_i] = (struct rx_call *) 0 |
589 | (afs_uint32 *)&tv.tv_sec, (afs_uint32 *)&tv.tv_usec)if (multi_h->nextReady == multi_h->firstNotReady && multi_i < multi_h->nConns) { multi_call = multi_h-> calls[multi_i]; if (multi_call) { StartRXAFS_GetTime(multi_call ); rx_FlushWrite(multi_call); } multi_i0++; continue; } if (( multi_i = multi_Select(multi_h)) < 0) break; multi_call = multi_h ->calls[multi_i]; multi_error = rx_EndCall(multi_call, EndRXAFS_GetTime (multi_call, (afs_uint32 *)&tv.tv_sec, (afs_uint32 *)& tv.tv_usec)); multi_h->calls[multi_i] = (struct rx_call *) 0; |
590 | tc = conns[multi_i]; |
591 | sa = tc->parent->srvr; |
592 | if (conntimer[multi_i] == 1) |
593 | rx_SetConnDeadTime(rxconns[multi_i], afs_rx_deadtime); |
594 | end = osi_Time()(time(((void *)0))); |
595 | results[multi_i]=multi_error; |
596 | if ((start == end) && !multi_error) |
597 | deltas[multi_i] = end - tv.tv_sec; |
598 | } multi_Endmulti_Finalize(multi_h); } while (0); |
599 | } else { /* find and query setTimeHost only */ |
600 | for ( i = 0 ; i < nservers ; i++ ) { |
601 | if ( conns[i] == NULL((void *)0) || conns[i]->parent->srvr == NULL((void *)0) ) |
602 | continue; |
603 | if ( conns[i]->parent->srvr->server == afs_setTimeHost ) { |
604 | tv.tv_sec = tv.tv_usec = 0; |
605 | results[i] = RXAFS_GetTime(rxconns[i], |
606 | (afs_uint32 *)&tv.tv_sec, |
607 | (afs_uint32 *)&tv.tv_usec); |
608 | end = osi_Time()(time(((void *)0))); |
609 | if ((start == end) && !results[i]) |
610 | deltas[i] = end - tv.tv_sec; |
611 | break; |
612 | } |
613 | } |
614 | } |
615 | AFS_GLOCK()do { do{if (!(pthread_mutex_lock(&afs_global_lock) == 0)) AssertionFailed("/home/wollman/openafs/src/afs/afs_server.c" , 615);}while(0); afs_global_owner = pthread_self(); } while( 0); |
616 | |
617 | if ( afs_setTimeHost == NULL((void *)0) ) |
618 | CkSrv_MarkUpDown(conns, nconns, results); |
619 | else /* We lack info for other than this host */ |
620 | CkSrv_MarkUpDown(&conns[i], 1, &results[i]); |
621 | |
622 | /* |
623 | * If we're supposed to set the time, and the call worked |
624 | * quickly (same second response) and this is the host we |
625 | * use for the time and the time is really different, then |
626 | * really set the time |
627 | */ |
628 | if (afs_setTime != 0) { |
629 | for (i=0; i<nconns; i++) { |
630 | delta = deltas[i]; |
631 | tc = conns[i]; |
632 | sa = tc->parent->srvr; |
633 | |
634 | if ((tc->parent->srvr->server == afs_setTimeHost || |
635 | /* Sync only to a server in the local cell */ |
636 | (afs_setTimeHost == (struct server *)0 && |
637 | afs_IsPrimaryCell(sa->server->cell)))) { |
638 | /* set the time */ |
639 | char msgbuf[90]; /* strlen("afs: setting clock...") + slop */ |
640 | delta = end - tv.tv_sec; /* how many secs fast we are */ |
641 | |
642 | afs_setTimeHost = tc->parent->srvr->server; |
643 | /* see if clock has changed enough to make it worthwhile */ |
644 | if (delta >= AFS_MINCHANGE2 || delta <= -AFS_MINCHANGE2) { |
645 | end = osi_Time()(time(((void *)0))); |
646 | if (delta > AFS_MAXCHANGEBACK10) { |
647 | /* setting clock too far back, just do it a little */ |
648 | tv.tv_sec = end - AFS_MAXCHANGEBACK10; |
649 | } else { |
650 | tv.tv_sec = end - delta; |
651 | } |
652 | afs_osi_SetTime(&tv); |
653 | if (delta > 0) { |
654 | strcpy(msgbuf, "afs: setting clock back "); |
655 | if (delta > AFS_MAXCHANGEBACK10) { |
656 | afs_strcat(msgbuf, |
657 | afs_cv2string(&tbuffer[CVBS12], |
658 | AFS_MAXCHANGEBACK10)); |
659 | afs_strcat(msgbuf, " seconds (of "); |
660 | afs_strcat(msgbuf, |
661 | afs_cv2string(&tbuffer[CVBS12], |
662 | delta - |
663 | AFS_MAXCHANGEBACK10)); |
664 | afs_strcat(msgbuf, ", via "); |
665 | print_internet_address(msgbuf, sa, |
666 | "); clock is still fast.", |
667 | 0); |
668 | } else { |
669 | afs_strcat(msgbuf, |
670 | afs_cv2string(&tbuffer[CVBS12], delta)); |
671 | afs_strcat(msgbuf, " seconds (via "); |
672 | print_internet_address(msgbuf, sa, ").", 0); |
673 | } |
674 | } else { |
675 | strcpy(msgbuf, "afs: setting clock ahead "); |
676 | afs_strcat(msgbuf, |
677 | afs_cv2string(&tbuffer[CVBS12], -delta)); |
678 | afs_strcat(msgbuf, " seconds (via "); |
679 | print_internet_address(msgbuf, sa, ").", 0); |
680 | } |
681 | /* We're only going to set it once; why bother looping? */ |
682 | break; |
683 | } |
684 | } |
685 | } |
686 | } |
687 | afs_osi_Free(conntimer, nservers * sizeof(afs_int32)); |
688 | afs_osi_Free(deltas, nservers * sizeof(afs_int32)); |
689 | afs_osi_Free(results, nservers * sizeof(afs_int32)); |
690 | } |
691 | |
692 | void |
693 | CkSrv_GetCaps(struct rx_connection **rxconns, int nconns, int nservers, |
694 | struct afs_conn **conns, struct srvAddr **addrs) |
695 | { |
696 | Capabilities *caps; |
697 | afs_int32 *results; |
698 | afs_int32 i; |
699 | struct server *ts; |
700 | |
701 | caps = afs_osi_Alloc(nservers * sizeof (Capabilities)); |
702 | osi_Assert(caps != NULL)(void)((caps != ((void *)0)) || (osi_AssertFailK( "caps != NULL" , "/home/wollman/openafs/src/afs/afs_server.c", 702), 0)); |
703 | memset(caps, 0, nservers * sizeof(Capabilities)); |
704 | |
705 | results = afs_osi_Alloc(nservers * sizeof (afs_int32)); |
706 | osi_Assert(results != NULL)(void)((results != ((void *)0)) || (osi_AssertFailK( "results != NULL" , "/home/wollman/openafs/src/afs/afs_server.c", 706), 0)); |
707 | |
708 | AFS_GUNLOCK()do { do { if (!(pthread_self() == afs_global_owner)) { osi_Panic ("afs global lock not held"); } } while(0); memset(&afs_global_owner , 0, sizeof(pthread_t)); do{if (!(pthread_mutex_unlock(&afs_global_lock ) == 0)) AssertionFailed("/home/wollman/openafs/src/afs/afs_server.c" , 708);}while(0); } while(0); |
709 | multi_Rx(rxconns,nconns)do { struct multi_handle *multi_h; int multi_i; int multi_i0; afs_int32 multi_error; struct rx_call *multi_call; multi_h = multi_Init(rxconns, nconns); for (multi_i0 = multi_i = 0; ; multi_i = multi_i0 ) |
710 | { |
711 | multi_RXAFS_GetCapabilities(&caps[multi_i])if (multi_h->nextReady == multi_h->firstNotReady && multi_i < multi_h->nConns) { multi_call = multi_h-> calls[multi_i]; if (multi_call) { StartRXAFS_GetCapabilities( multi_call); rx_FlushWrite(multi_call); } multi_i0++; continue ; } if ((multi_i = multi_Select(multi_h)) < 0) break; multi_call = multi_h->calls[multi_i]; multi_error = rx_EndCall(multi_call , EndRXAFS_GetCapabilities(multi_call, &caps[multi_i])); multi_h ->calls[multi_i] = (struct rx_call *) 0; |
712 | results[multi_i] = multi_error; |
713 | } multi_Endmulti_Finalize(multi_h); } while (0); |
714 | AFS_GLOCK()do { do{if (!(pthread_mutex_lock(&afs_global_lock) == 0)) AssertionFailed("/home/wollman/openafs/src/afs/afs_server.c" , 714);}while(0); afs_global_owner = pthread_self(); } while( 0); |
715 | |
716 | for ( i = 0 ; i < nconns ; i++ ) { |
717 | ts = addrs[i]->server; |
718 | if ( !ts ) |
719 | continue; |
720 | ts->capabilities = 0; |
721 | ts->flags |= SCAPS_KNOWN0x400; |
722 | if ( results[i] == RXGEN_OPCODE-455 ) { |
723 | /* Mark server as up - it responded */ |
724 | results[i] = 0; |
725 | continue; |
726 | } |
727 | if ( results[i] >= 0 ) |
728 | /* we currently handle 32-bits of capabilities */ |
729 | if (caps[i].Capabilities_len > 0) { |
730 | ts->capabilities = caps[i].Capabilities_val[0]; |
731 | xdr_freeafs_xdr_free((xdrproc_t)xdr_Capabilities, &caps[i]); |
732 | caps[i].Capabilities_val = NULL((void *)0); |
733 | caps[i].Capabilities_len = 0; |
734 | } |
735 | } |
736 | CkSrv_MarkUpDown(conns, nconns, results); |
737 | |
738 | afs_osi_Free(caps, nservers * sizeof(Capabilities)); |
739 | afs_osi_Free(results, nservers * sizeof(afs_int32)); |
740 | } |
741 | |
742 | /* check down servers (if adown), or running servers (if !adown) */ |
743 | void |
744 | afs_CheckServers(int adown, struct cell *acellp) |
745 | { |
746 | afs_LoopServers(adown?AFS_LS_DOWN1:AFS_LS_UP0, acellp, 1, CkSrv_GetCaps, |
747 | afs_setTime?CkSrv_SetTime:NULL((void *)0)); |
748 | } |
749 | |
750 | /* adown: AFS_LS_UP - check only up |
751 | * AFS_LS_DOWN - check only down. |
752 | * AFS_LS_ALL - check all */ |
753 | void |
754 | afs_LoopServers(int adown, struct cell *acellp, int vlalso, |
755 | void (*func1) (struct rx_connection **rxconns, int nconns, |
756 | int nservers, struct afs_conn **conns, |
757 | struct srvAddr **addrs), |
758 | void (*func2) (struct rx_connection **rxconns, int nconns, |
759 | int nservers, struct afs_conn **conns, |
760 | struct srvAddr **addrs)) |
761 | { |
762 | struct vrequest treq; |
763 | struct server *ts; |
764 | struct srvAddr *sa; |
765 | struct afs_conn *tc = NULL((void *)0); |
766 | afs_int32 i, j; |
767 | afs_int32 code; |
768 | struct unixuser *tu; |
769 | int srvAddrCount; |
770 | struct srvAddr **addrs; |
771 | struct afs_conn **conns; |
772 | int nconns; |
773 | struct rx_connection **rxconns; |
774 | afs_int32 *conntimer, *results; |
775 | |
776 | AFS_STATCNT(afs_CheckServers)((afs_cmstats.callInfo.C_afs_CheckServers)++); |
777 | |
778 | /* |
779 | * No sense in doing the server checks if we are running in disconnected |
780 | * mode |
781 | */ |
782 | if (AFS_IS_DISCONNECTED(afs_is_disconnected)) |
783 | return; |
784 | |
785 | conns = (struct afs_conn **)0; |
786 | rxconns = (struct rx_connection **) 0; |
Value stored to 'rxconns' is never read | |
787 | conntimer = 0; |
788 | nconns = 0; |
789 | |
790 | if ((code = afs_InitReq(&treq, afs_osi_credp))) |
791 | return; |
792 | ObtainReadLock(&afs_xserver)do { ; if (!((&afs_xserver)->excl_locked & 2)) ((& afs_xserver)->readers_reading)++; else Afs_Lock_Obtain(& afs_xserver, 1); (&afs_xserver)->pid_last_reader = (get_user_struct ()->u_procp->p_pid ); } while (0); /* Necessary? */ |
793 | ObtainReadLock(&afs_xsrvAddr)do { ; if (!((&afs_xsrvAddr)->excl_locked & 2)) (( &afs_xsrvAddr)->readers_reading)++; else Afs_Lock_Obtain (&afs_xsrvAddr, 1); (&afs_xsrvAddr)->pid_last_reader = (get_user_struct()->u_procp->p_pid ); } while (0); |
794 | |
795 | srvAddrCount = 0; |
796 | for (i = 0; i < NSERVERS16; i++) { |
797 | for (sa = afs_srvAddrs[i]; sa; sa = sa->next_bkt) { |
798 | srvAddrCount++; |
799 | } |
800 | } |
801 | |
802 | addrs = afs_osi_Alloc(srvAddrCount * sizeof(*addrs)); |
803 | osi_Assert(addrs != NULL)(void)((addrs != ((void *)0)) || (osi_AssertFailK( "addrs != NULL" , "/home/wollman/openafs/src/afs/afs_server.c", 803), 0)); |
804 | j = 0; |
805 | for (i = 0; i < NSERVERS16; i++) { |
806 | for (sa = afs_srvAddrs[i]; sa; sa = sa->next_bkt) { |
807 | if (j >= srvAddrCount) |
808 | break; |
809 | addrs[j++] = sa; |
810 | } |
811 | } |
812 | |
813 | ReleaseReadLock(&afs_xsrvAddr)do { ; if (!(--((&afs_xsrvAddr)->readers_reading)) && (&afs_xsrvAddr)->wait_states) Afs_Lock_ReleaseW(& afs_xsrvAddr) ; if ( (&afs_xsrvAddr)->pid_last_reader == (get_user_struct()->u_procp->p_pid ) ) (&afs_xsrvAddr )->pid_last_reader =0; } while (0); |
814 | ReleaseReadLock(&afs_xserver)do { ; if (!(--((&afs_xserver)->readers_reading)) && (&afs_xserver)->wait_states) Afs_Lock_ReleaseW(&afs_xserver ) ; if ( (&afs_xserver)->pid_last_reader == (get_user_struct ()->u_procp->p_pid ) ) (&afs_xserver)->pid_last_reader =0; } while (0); |
815 | |
816 | conns = afs_osi_Alloc(j * sizeof(struct afs_conn *)); |
817 | osi_Assert(conns != NULL)(void)((conns != ((void *)0)) || (osi_AssertFailK( "conns != NULL" , "/home/wollman/openafs/src/afs/afs_server.c", 817), 0)); |
818 | rxconns = afs_osi_Alloc(j * sizeof(struct rx_connection *)); |
819 | osi_Assert(rxconns != NULL)(void)((rxconns != ((void *)0)) || (osi_AssertFailK( "rxconns != NULL" , "/home/wollman/openafs/src/afs/afs_server.c", 819), 0)); |
820 | conntimer = afs_osi_Alloc(j * sizeof (afs_int32)); |
821 | osi_Assert(conntimer != NULL)(void)((conntimer != ((void *)0)) || (osi_AssertFailK( "conntimer != NULL" , "/home/wollman/openafs/src/afs/afs_server.c", 821), 0)); |
822 | results = afs_osi_Alloc(j * sizeof (afs_int32)); |
823 | osi_Assert(results != NULL)(void)((results != ((void *)0)) || (osi_AssertFailK( "results != NULL" , "/home/wollman/openafs/src/afs/afs_server.c", 823), 0)); |
824 | |
825 | for (i = 0; i < j; i++) { |
826 | struct rx_connection *rxconn; |
827 | sa = addrs[i]; |
828 | ts = sa->server; |
829 | if (!ts) |
830 | continue; |
831 | |
832 | /* See if a cell to check was specified. If it is spec'd and not |
833 | * this server's cell, just skip the server. |
834 | */ |
835 | if (acellp && acellp != ts->cell) |
836 | continue; |
837 | |
838 | if (((adown==AFS_LS_DOWN1) && !(sa->sa_flags & SRVADDR_ISDOWN0x20)) |
839 | || ((adown==AFS_LS_UP0) && (sa->sa_flags & SRVADDR_ISDOWN0x20))) |
840 | continue; |
841 | |
842 | /* check vlserver with special code */ |
843 | if (sa->sa_portal == AFS_VLPORT((unsigned short) (__builtin_constant_p(7003) ? (__uint16_t)( ((__uint16_t)(7003)) << 8 | ((__uint16_t)(7003)) >> 8) : __bswap16_var(7003)))) { |
844 | if (vlalso) |
845 | CheckVLServer(sa, &treq); |
846 | continue; |
847 | } |
848 | |
849 | if (!ts->cell) /* not really an active server, anyway, it must */ |
850 | continue; /* have just been added by setsprefs */ |
851 | |
852 | /* get a connection, even if host is down; bumps conn ref count */ |
853 | tu = afs_GetUser(treq.uid, ts->cell->cellNum, SHARED_LOCK4); |
854 | tc = afs_ConnBySA(sa, ts->cell->fsport, ts->cell->cellNum, tu, |
855 | 1 /*force */ , 1 /*create */ , SHARED_LOCK4, &rxconn); |
856 | afs_PutUser(tu, SHARED_LOCK4); |
857 | if (!tc) |
858 | continue; |
859 | |
860 | if ((sa->sa_flags & SRVADDR_ISDOWN0x20) || afs_HaveCallBacksFrom(sa->server) |
861 | || (tc->parent->srvr->server == afs_setTimeHost)) { |
862 | conns[nconns]=tc; |
863 | rxconns[nconns]=rxconn; |
864 | if (sa->sa_flags & SRVADDR_ISDOWN0x20) { |
865 | rx_SetConnDeadTime(rxconn, 3); |
866 | conntimer[nconns]=1; |
867 | } else { |
868 | conntimer[nconns]=0; |
869 | } |
870 | nconns++; |
871 | } |
872 | } /* Outer loop over addrs */ |
873 | |
874 | (*func1)(rxconns, nconns, j, conns, addrs); |
875 | |
876 | if (func2) { |
877 | (*func2)(rxconns, nconns, j, conns, addrs); |
878 | } |
879 | |
880 | for (i = 0; i < nconns; i++) { |
881 | if (conntimer[i] == 1) |
882 | rx_SetConnDeadTime(rxconns[i], afs_rx_deadtime); |
883 | afs_PutConn(conns[i], rxconns[i], SHARED_LOCK4); /* done with it now */ |
884 | } |
885 | |
886 | afs_osi_Free(addrs, srvAddrCount * sizeof(*addrs)); |
887 | afs_osi_Free(conns, j * sizeof(struct afs_conn *)); |
888 | afs_osi_Free(rxconns, j * sizeof(struct rx_connection *)); |
889 | afs_osi_Free(conntimer, j * sizeof(afs_int32)); |
890 | afs_osi_Free(results, j * sizeof(afs_int32)); |
891 | |
892 | } /*afs_CheckServers*/ |
893 | |
894 | |
895 | /* find a server structure given the host address */ |
896 | struct server * |
897 | afs_FindServer(afs_int32 aserver, afs_uint16 aport, afsUUID * uuidp, |
898 | afs_int32 locktype) |
899 | { |
900 | struct server *ts; |
901 | struct srvAddr *sa; |
902 | int i; |
903 | |
904 | AFS_STATCNT(afs_FindServer)((afs_cmstats.callInfo.C_afs_FindServer)++); |
905 | if (uuidp) { |
906 | i = afs_uuid_hash(uuidp) % NSERVERS16; |
907 | for (ts = afs_servers[i]; ts; ts = ts->next) { |
908 | if ((ts->flags & SRVR_MULTIHOMED0x40) |
909 | && |
910 | (memcmp((char *)uuidp, (char *)&ts->sr_uuid_suid._srvUuid.suuid, sizeof(*uuidp)) |
911 | == 0) && (!ts->addr || (ts->addr->sa_portal == aport))) |
912 | return ts; |
913 | } |
914 | } else { |
915 | i = SHash(aserver)(((__builtin_constant_p(aserver) ? ((((__uint32_t)(aserver)) >> 24) | ((((__uint32_t)(aserver)) & (0xff << 16)) >> 8) | ((((__uint32_t)(aserver)) & (0xff << 8)) << 8) | (((__uint32_t)(aserver)) << 24)) : __bswap32_var( aserver))) & (16 -1)); |
916 | for (sa = afs_srvAddrs[i]; sa; sa = sa->next_bkt) { |
917 | if ((sa->sa_ip == aserver) && (sa->sa_portal == aport)) { |
918 | return sa->server; |
919 | } |
920 | } |
921 | } |
922 | return NULL((void *)0); |
923 | |
924 | } /*afs_FindServer */ |
925 | |
926 | |
927 | /* some code for creating new server structs and setting preferences follows |
928 | * in the next few lines... |
929 | */ |
930 | |
931 | #define MAXDEFRANK60000 60000 |
932 | #define DEFRANK40000 40000 |
933 | |
934 | /* Random number generator and constants from KnuthV2 2d ed, p170 */ |
935 | |
936 | /* Rules: |
937 | X = (aX + c) % m |
938 | m is a power of two |
939 | a % 8 is 5 |
940 | a is 0.73m should be 0.01m .. 0.99m |
941 | c is more or less immaterial. 1 or a is suggested. |
942 | |
943 | NB: LOW ORDER BITS are not very random. To get small random numbers, |
944 | treat result as <1, with implied binary point, and multiply by |
945 | desired modulus. |
946 | NB: Has to be unsigned, since shifts on signed quantities may preserve |
947 | the sign bit. |
948 | */ |
949 | /* added rxi_getaddr() to try to get as much initial randomness as |
950 | possible, since at least one customer reboots ALL their clients |
951 | simultaneously -- so osi_Time is bound to be the same on some of the |
952 | clients. This is probably OK, but I don't want to see too much of it. |
953 | */ |
954 | |
955 | #define ranstage(x)(x)= (afs_uint32) (3141592621U*((afs_uint32)x)+1) (x)= (afs_uint32) (3141592621U*((afs_uint32)x)+1) |
956 | |
957 | unsigned int |
958 | afs_random(void) |
959 | { |
960 | static afs_int32 state = 0; |
961 | int i; |
962 | |
963 | AFS_STATCNT(afs_random)((afs_cmstats.callInfo.C_afs_random)++); |
964 | if (!state) { |
965 | osi_timeval_t t; |
966 | osi_GetTime(&t); |
967 | /* |
968 | * 0xfffffff0 was changed to (~0 << 4) since it works no matter how many |
969 | * bits are in a tv_usec |
970 | */ |
971 | state = (t.tv_usec & (~0 << 4)) + (rxi_getaddr() & 0xff); |
972 | state += (t.tv_sec & 0xff); |
973 | for (i = 0; i < 30; i++) { |
974 | ranstage(state)(state)= (afs_uint32) (3141592621U*((afs_uint32)state)+1); |
975 | } |
976 | } |
977 | |
978 | ranstage(state)(state)= (afs_uint32) (3141592621U*((afs_uint32)state)+1); |
979 | return (state); |
980 | |
981 | } /*afs_random */ |
982 | |
983 | /* returns int 0..14 using the high bits of a pseudo-random number instead of |
984 | the low bits, as the low bits are "less random" than the high ones... |
985 | slight roundoff error exists, an excercise for the reader. |
986 | need to multiply by something with lots of ones in it, so multiply by |
987 | 8 or 16 is right out. |
988 | */ |
989 | int |
990 | afs_randomMod15(void) |
991 | { |
992 | afs_uint32 temp; |
993 | |
994 | temp = afs_random() >> 4; |
995 | temp = (temp * 15) >> 28; |
996 | |
997 | return temp; |
998 | } |
999 | |
1000 | int |
1001 | afs_randomMod127(void) |
1002 | { |
1003 | afs_uint32 temp; |
1004 | |
1005 | temp = afs_random() >> 7; |
1006 | temp = (temp * 127) >> 25; |
1007 | |
1008 | return temp; |
1009 | } |
1010 | |
1011 | /* afs_SortOneServer() |
1012 | * Sort all of the srvAddrs, of a server struct, by rank from low to high. |
1013 | */ |
1014 | void |
1015 | afs_SortOneServer(struct server *asp) |
1016 | { |
1017 | struct srvAddr **rootsa, *lowsa, *tsa, *lowprev; |
1018 | int lowrank, rank; |
1019 | |
1020 | for (rootsa = &(asp->addr); *rootsa; rootsa = &(lowsa->next_sa)) { |
1021 | lowprev = NULL((void *)0); |
1022 | lowsa = *rootsa; /* lowest sa is the first one */ |
1023 | lowrank = lowsa->sa_iprank; |
1024 | |
1025 | for (tsa = *rootsa; tsa->next_sa; tsa = tsa->next_sa) { |
1026 | rank = tsa->next_sa->sa_iprank; |
1027 | if (rank < lowrank) { |
1028 | lowprev = tsa; |
1029 | lowsa = tsa->next_sa; |
1030 | lowrank = lowsa->sa_iprank; |
1031 | } |
1032 | } |
1033 | if (lowprev) { /* found one lower, so rearrange them */ |
1034 | lowprev->next_sa = lowsa->next_sa; |
1035 | lowsa->next_sa = *rootsa; |
1036 | *rootsa = lowsa; |
1037 | } |
1038 | } |
1039 | } |
1040 | |
1041 | /* afs_SortServer() |
1042 | * Sort the pointer to servers by the server's rank (its lowest rank). |
1043 | * It is assumed that the server already has its IP addrs sorted (the |
1044 | * first being its lowest rank: afs_GetServer() calls afs_SortOneServer()). |
1045 | */ |
1046 | void |
1047 | afs_SortServers(struct server *aservers[], int count) |
1048 | { |
1049 | struct server *ts; |
1050 | int i, j, low; |
1051 | |
1052 | AFS_STATCNT(afs_SortServers)((afs_cmstats.callInfo.C_afs_SortServers)++); |
1053 | |
1054 | for (i = 0; i < count; i++) { |
1055 | if (!aservers[i]) |
1056 | break; |
1057 | for (low = i, j = i + 1; j <= count; j++) { |
1058 | if ((!aservers[j]) || (!aservers[j]->addr)) |
1059 | break; |
1060 | if ((!aservers[low]) || (!aservers[low]->addr)) |
1061 | break; |
1062 | if (aservers[j]->addr->sa_iprank < aservers[low]->addr->sa_iprank) { |
1063 | low = j; |
1064 | } |
1065 | } |
1066 | if (low != i) { |
1067 | ts = aservers[i]; |
1068 | aservers[i] = aservers[low]; |
1069 | aservers[low] = ts; |
1070 | } |
1071 | } |
1072 | } /*afs_SortServers */ |
1073 | |
1074 | /* afs_SetServerPrefs is rather system-dependent. It pokes around in kernel |
1075 | data structures to determine what the local IP addresses and subnet masks |
1076 | are in order to choose which server(s) are on the local subnet. |
1077 | |
1078 | As I see it, there are several cases: |
1079 | 1. The server address is one of this host's local addresses. In this case |
1080 | this server is to be preferred over all others. |
1081 | 2. The server is on the same subnet as one of the this host's local |
1082 | addresses. (ie, an odd-sized subnet, not class A,B,orC) |
1083 | 3. The server is on the same net as this host (class A,B or C) |
1084 | 4. The server is on a different logical subnet or net than this host, but |
1085 | this host is a 'metric 0 gateway' to it. Ie, two address-spaces share |
1086 | one physical medium. |
1087 | 5. This host has a direct (point-to-point, ie, PPP or SLIP) link to the |
1088 | server. |
1089 | 6. This host and the server are disjoint. |
1090 | |
1091 | That is a rough order of preference. If a point-to-point link has a high |
1092 | metric, I'm assuming that it is a very slow link, and putting it at the |
1093 | bottom of the list (at least until RX works better over slow links). If |
1094 | its metric is 1, I'm assuming that it's relatively fast (T1) and putting |
1095 | it ahead of #6. |
1096 | It's not easy to check for case #4, so I'm ignoring it for the time being. |
1097 | |
1098 | BSD "if" code keeps track of some rough network statistics (cf 'netstat -i') |
1099 | That could be used to prefer certain servers fairly easily. Maybe some |
1100 | other time... |
1101 | |
1102 | NOTE: this code is very system-dependent, and very dependent on the TCP/IP |
1103 | protocols (well, addresses that are stored in uint32s, at any rate). |
1104 | */ |
1105 | |
1106 | #define IA_DST(ia)((struct sockaddr_in *)(&((struct usr_in_ifaddr *)ia)-> ia_dstaddr))((struct sockaddr_in *)(&((struct in_ifaddrusr_in_ifaddr *)ia)->ia_dstaddr)) |
1107 | #define IA_BROAD(ia)((struct sockaddr_in *)(&((struct usr_in_ifaddr *)ia)-> ia_broadaddr))((struct sockaddr_in *)(&((struct in_ifaddrusr_in_ifaddr *)ia)->ia_broadaddr)) |
1108 | |
1109 | /* SA2ULONG takes a sockaddr_in, not a sockaddr (same thing, just cast it!) */ |
1110 | #define SA2ULONG(sa)((sa)->sin_addr.s_addr) ((sa)->sin_addr.s_addr) |
1111 | #define TOPR 5000 |
1112 | #define HI 20000 |
1113 | #define MED 30000 |
1114 | #define LO DEFRANK40000 |
1115 | #define PPWEIGHT 4096 |
1116 | |
1117 | #define USEIFADDR |
1118 | |
1119 | #ifdef AFS_USERSPACE_IP_ADDR1 |
1120 | #ifndef afs_min |
1121 | #define afs_min(A,B)((A)<(B)) ? (A) : (B) ((A)<(B)) ? (A) : (B) |
1122 | #endif |
1123 | /* |
1124 | * The IP addresses and ranks are determined by afsd (in user space) and |
1125 | * passed into the kernel at startup time through the AFSOP_ADVISEADDR |
1126 | * system call. These are stored in the data structure |
1127 | * called 'afs_cb_interface'. |
1128 | * |
1129 | * struct srvAddr *sa; remote server |
1130 | * afs_int32 addr; one of my local addr in net order |
1131 | * afs_uint32 subnetmask; subnet mask of local addr in net order |
1132 | * |
1133 | */ |
1134 | void |
1135 | afsi_SetServerIPRank(struct srvAddr *sa, afs_int32 addr, |
1136 | afs_uint32 subnetmask) |
1137 | { |
1138 | afs_uint32 myAddr, myNet, mySubnet, netMask; |
1139 | afs_uint32 serverAddr; |
1140 | |
1141 | myAddr = ntohl(addr)(__builtin_constant_p(addr) ? ((((__uint32_t)(addr)) >> 24) | ((((__uint32_t)(addr)) & (0xff << 16)) >> 8) | ((((__uint32_t)(addr)) & (0xff << 8)) << 8) | (((__uint32_t)(addr)) << 24)) : __bswap32_var(addr )); /* one of my IP addr in host order */ |
1142 | serverAddr = ntohl(sa->sa_ip)(__builtin_constant_p(sa->sa_ip) ? ((((__uint32_t)(sa-> sa_ip)) >> 24) | ((((__uint32_t)(sa->sa_ip)) & ( 0xff << 16)) >> 8) | ((((__uint32_t)(sa->sa_ip )) & (0xff << 8)) << 8) | (((__uint32_t)(sa-> sa_ip)) << 24)) : __bswap32_var(sa->sa_ip)); /* server's IP addr in host order */ |
1143 | subnetmask = ntohl(subnetmask)(__builtin_constant_p(subnetmask) ? ((((__uint32_t)(subnetmask )) >> 24) | ((((__uint32_t)(subnetmask)) & (0xff << 16)) >> 8) | ((((__uint32_t)(subnetmask)) & (0xff << 8)) << 8) | (((__uint32_t)(subnetmask)) << 24)) : __bswap32_var(subnetmask)); /* subnet mask in host order */ |
1144 | |
1145 | if (IN_CLASSA(myAddr)(((u_int32_t)(myAddr) & 0x80000000) == 0)) |
1146 | netMask = IN_CLASSA_NET0xff000000; |
1147 | else if (IN_CLASSB(myAddr)(((u_int32_t)(myAddr) & 0xc0000000) == 0x80000000)) |
1148 | netMask = IN_CLASSB_NET0xffff0000; |
1149 | else if (IN_CLASSC(myAddr)(((u_int32_t)(myAddr) & 0xe0000000) == 0xc0000000)) |
1150 | netMask = IN_CLASSC_NET0xffffff00; |
1151 | else |
1152 | netMask = 0; |
1153 | |
1154 | myNet = myAddr & netMask; |
1155 | mySubnet = myAddr & subnetmask; |
1156 | |
1157 | if ((serverAddr & netMask) == myNet) { |
1158 | if ((serverAddr & subnetmask) == mySubnet) { |
1159 | if (serverAddr == myAddr) { /* same machine */ |
1160 | sa->sa_iprank = afs_min(sa->sa_iprank, TOPR)((sa->sa_iprank)<(TOPR)) ? (sa->sa_iprank) : (TOPR); |
1161 | } else { /* same subnet */ |
1162 | sa->sa_iprank = afs_min(sa->sa_iprank, HI)((sa->sa_iprank)<(HI)) ? (sa->sa_iprank) : (HI); |
1163 | } |
1164 | } else { /* same net */ |
1165 | sa->sa_iprank = afs_min(sa->sa_iprank, MED)((sa->sa_iprank)<(MED)) ? (sa->sa_iprank) : (MED); |
1166 | } |
1167 | } |
1168 | return; |
1169 | } |
1170 | #else /* AFS_USERSPACE_IP_ADDR */ |
1171 | #if (! defined(AFS_SUN5_ENV)) && (! defined(AFS_DARWIN_ENV)) && (! defined(AFS_OBSD47_ENV)) && defined(USEIFADDR) |
1172 | void |
1173 | afsi_SetServerIPRank(struct srvAddr *sa, struct in_ifaddrusr_in_ifaddr *ifa) |
1174 | { |
1175 | struct sockaddr_in *sin; |
1176 | int t; |
1177 | |
1178 | if ((ntohl(sa->sa_ip)(__builtin_constant_p(sa->sa_ip) ? ((((__uint32_t)(sa-> sa_ip)) >> 24) | ((((__uint32_t)(sa->sa_ip)) & ( 0xff << 16)) >> 8) | ((((__uint32_t)(sa->sa_ip )) & (0xff << 8)) << 8) | (((__uint32_t)(sa-> sa_ip)) << 24)) : __bswap32_var(sa->sa_ip)) & ifa->ia_netmask) == ifa->ia_net) { |
1179 | if ((ntohl(sa->sa_ip)(__builtin_constant_p(sa->sa_ip) ? ((((__uint32_t)(sa-> sa_ip)) >> 24) | ((((__uint32_t)(sa->sa_ip)) & ( 0xff << 16)) >> 8) | ((((__uint32_t)(sa->sa_ip )) & (0xff << 8)) << 8) | (((__uint32_t)(sa-> sa_ip)) << 24)) : __bswap32_var(sa->sa_ip)) & ifa->ia_subnetmask) == ifa->ia_subnet) { |
1180 | sin = IA_SIN(ifa)(&(ifa)->ia_addr); |
1181 | if (SA2ULONG(sin)((sin)->sin_addr.s_addr) == ntohl(sa->sa_ip)(__builtin_constant_p(sa->sa_ip) ? ((((__uint32_t)(sa-> sa_ip)) >> 24) | ((((__uint32_t)(sa->sa_ip)) & ( 0xff << 16)) >> 8) | ((((__uint32_t)(sa->sa_ip )) & (0xff << 8)) << 8) | (((__uint32_t)(sa-> sa_ip)) << 24)) : __bswap32_var(sa->sa_ip))) { /* ie, ME!!! */ |
1182 | sa->sa_iprank = TOPR; |
1183 | } else { |
1184 | t = HI + ifa->ia_ifp->if_metric; /* case #2 */ |
1185 | if (sa->sa_iprank > t) |
1186 | sa->sa_iprank = t; |
1187 | } |
1188 | } else { |
1189 | t = MED + ifa->ia_ifp->if_metric; /* case #3 */ |
1190 | if (sa->sa_iprank > t) |
1191 | sa->sa_iprank = t; |
1192 | } |
1193 | } |
1194 | #ifdef IFF_POINTTOPOINT |
1195 | /* check for case #4 -- point-to-point link */ |
1196 | if ((ifa->ia_ifp->if_flags & IFF_POINTOPOINT0x10) |
1197 | && (SA2ULONG(IA_DST(ifa))((((struct sockaddr_in *)(&((struct usr_in_ifaddr *)ifa)-> ia_dstaddr)))->sin_addr.s_addr) == ntohl(sa->sa_ip)(__builtin_constant_p(sa->sa_ip) ? ((((__uint32_t)(sa-> sa_ip)) >> 24) | ((((__uint32_t)(sa->sa_ip)) & ( 0xff << 16)) >> 8) | ((((__uint32_t)(sa->sa_ip )) & (0xff << 8)) << 8) | (((__uint32_t)(sa-> sa_ip)) << 24)) : __bswap32_var(sa->sa_ip)))) { |
1198 | if (ifa->ia_ifp->if_metric >= (MAXDEFRANK60000 - MED) / PPWEIGHT) |
1199 | t = MAXDEFRANK60000; |
1200 | else |
1201 | t = MED + (PPWEIGHT << ifa->ia_ifp->if_metric); |
1202 | if (sa->sa_iprank > t) |
1203 | sa->sa_iprank = t; |
1204 | } |
1205 | #endif /* IFF_POINTTOPOINT */ |
1206 | } |
1207 | #endif /*(!defined(AFS_SUN5_ENV)) && defined(USEIFADDR) */ |
1208 | #if (defined(AFS_DARWIN_ENV) || defined(AFS_OBSD47_ENV)) && defined(USEIFADDR) |
1209 | #ifndef afs_min |
1210 | #define afs_min(A,B)((A)<(B)) ? (A) : (B) ((A)<(B)) ? (A) : (B) |
1211 | #endif |
1212 | void |
1213 | afsi_SetServerIPRank(struct srvAddr *sa, rx_ifaddr_tstruct usr_ifaddr * ifa) |
1214 | { |
1215 | struct sockaddr sout; |
1216 | struct sockaddr_in *sin; |
1217 | int t; |
1218 | |
1219 | afs_uint32 subnetmask, myAddr, myNet, myDstaddr, mySubnet, netMask; |
1220 | afs_uint32 serverAddr; |
1221 | |
1222 | if (rx_ifaddr_address_family(ifa)(ifa)->ifa_addr->sa_family != AF_INET2) |
1223 | return; |
1224 | t = rx_ifaddr_address(ifa, &sout, sizeof(sout))memcpy(&sout, (ifa)->ifa_addr, sizeof(sout)); |
1225 | if (t != 0) { |
1226 | sin = (struct sockaddr_in *)&sout; |
1227 | myAddr = ntohl(sin->sin_addr.s_addr)(__builtin_constant_p(sin->sin_addr.s_addr) ? ((((__uint32_t )(sin->sin_addr.s_addr)) >> 24) | ((((__uint32_t)(sin ->sin_addr.s_addr)) & (0xff << 16)) >> 8) | ((((__uint32_t)(sin->sin_addr.s_addr)) & (0xff << 8)) << 8) | (((__uint32_t)(sin->sin_addr.s_addr)) << 24)) : __bswap32_var(sin->sin_addr.s_addr)); /* one of my IP addr in host order */ |
1228 | } else { |
1229 | myAddr = 0; |
1230 | } |
1231 | serverAddr = ntohl(sa->sa_ip)(__builtin_constant_p(sa->sa_ip) ? ((((__uint32_t)(sa-> sa_ip)) >> 24) | ((((__uint32_t)(sa->sa_ip)) & ( 0xff << 16)) >> 8) | ((((__uint32_t)(sa->sa_ip )) & (0xff << 8)) << 8) | (((__uint32_t)(sa-> sa_ip)) << 24)) : __bswap32_var(sa->sa_ip)); /* server's IP addr in host order */ |
1232 | t = rx_ifaddr_netmask(ifa, &sout, sizeof(sout))memcpy(&sout, (ifa)->ifa_netmask, sizeof(sout)); |
1233 | if (t != 0) { |
1234 | sin = (struct sockaddr_in *)&sout; |
1235 | subnetmask = ntohl(sin->sin_addr.s_addr)(__builtin_constant_p(sin->sin_addr.s_addr) ? ((((__uint32_t )(sin->sin_addr.s_addr)) >> 24) | ((((__uint32_t)(sin ->sin_addr.s_addr)) & (0xff << 16)) >> 8) | ((((__uint32_t)(sin->sin_addr.s_addr)) & (0xff << 8)) << 8) | (((__uint32_t)(sin->sin_addr.s_addr)) << 24)) : __bswap32_var(sin->sin_addr.s_addr)); /* subnet mask in host order */ |
1236 | } else { |
1237 | subnetmask = 0; |
1238 | } |
1239 | t = rx_ifaddr_dstaddress(ifa, &sout, sizeof(sout))memcpy(&sout, (ifa)->ifa_dstaddr, sizeof(sout)); |
1240 | if (t != 0) { |
1241 | sin = (struct sockaddr_in *)&sout; |
1242 | myDstaddr = ntohl(sin->sin_addr.s_addr)(__builtin_constant_p(sin->sin_addr.s_addr) ? ((((__uint32_t )(sin->sin_addr.s_addr)) >> 24) | ((((__uint32_t)(sin ->sin_addr.s_addr)) & (0xff << 16)) >> 8) | ((((__uint32_t)(sin->sin_addr.s_addr)) & (0xff << 8)) << 8) | (((__uint32_t)(sin->sin_addr.s_addr)) << 24)) : __bswap32_var(sin->sin_addr.s_addr)); |
1243 | } else { |
1244 | myDstaddr = 0; |
1245 | } |
1246 | |
1247 | if (IN_CLASSA(myAddr)(((u_int32_t)(myAddr) & 0x80000000) == 0)) |
1248 | netMask = IN_CLASSA_NET0xff000000; |
1249 | else if (IN_CLASSB(myAddr)(((u_int32_t)(myAddr) & 0xc0000000) == 0x80000000)) |
1250 | netMask = IN_CLASSB_NET0xffff0000; |
1251 | else if (IN_CLASSC(myAddr)(((u_int32_t)(myAddr) & 0xe0000000) == 0xc0000000)) |
1252 | netMask = IN_CLASSC_NET0xffffff00; |
1253 | else |
1254 | netMask = 0; |
1255 | |
1256 | myNet = myAddr & netMask; |
1257 | mySubnet = myAddr & subnetmask; |
1258 | |
1259 | if ((serverAddr & netMask) == myNet) { |
1260 | if ((serverAddr & subnetmask) == mySubnet) { |
1261 | if (serverAddr == myAddr) { /* same machine */ |
1262 | sa->sa_iprank = afs_min(sa->sa_iprank, TOPR)((sa->sa_iprank)<(TOPR)) ? (sa->sa_iprank) : (TOPR); |
1263 | } else { /* same subnet */ |
1264 | sa->sa_iprank = afs_min(sa->sa_iprank, HI + rx_ifnet_metric(rx_ifaddr_ifnet(ifa)))((sa->sa_iprank)<(HI + ((ifa?(ifa)->ifa_ifp:0)?((ifa ?(ifa)->ifa_ifp:0))->if_data.ifi_metric:0))) ? (sa-> sa_iprank) : (HI + ((ifa?(ifa)->ifa_ifp:0)?((ifa?(ifa)-> ifa_ifp:0))->if_data.ifi_metric:0)); |
1265 | } |
1266 | } else { /* same net */ |
1267 | sa->sa_iprank = afs_min(sa->sa_iprank, MED + rx_ifnet_metric(rx_ifaddr_ifnet(ifa)))((sa->sa_iprank)<(MED + ((ifa?(ifa)->ifa_ifp:0)?((ifa ?(ifa)->ifa_ifp:0))->if_data.ifi_metric:0))) ? (sa-> sa_iprank) : (MED + ((ifa?(ifa)->ifa_ifp:0)?((ifa?(ifa)-> ifa_ifp:0))->if_data.ifi_metric:0)); |
1268 | } |
1269 | } |
1270 | #ifdef IFF_POINTTOPOINT |
1271 | /* check for case #4 -- point-to-point link */ |
1272 | if ((rx_ifnet_flags(rx_ifaddr_ifnet(ifa))((ifa?(ifa)->ifa_ifp:0)?((ifa?(ifa)->ifa_ifp:0))->if_flags :0) & IFF_POINTOPOINT0x10) |
1273 | && (myDstaddr == serverAddr)) { |
1274 | if (rx_ifnet_metric(rx_ifaddr_ifnet(ifa))((ifa?(ifa)->ifa_ifp:0)?((ifa?(ifa)->ifa_ifp:0))->if_data .ifi_metric:0) >= (MAXDEFRANK60000 - MED) / PPWEIGHT) |
1275 | t = MAXDEFRANK60000; |
1276 | else |
1277 | t = MED + (PPWEIGHT << rx_ifnet_metric(rx_ifaddr_ifnet(ifa))((ifa?(ifa)->ifa_ifp:0)?((ifa?(ifa)->ifa_ifp:0))->if_data .ifi_metric:0)); |
1278 | if (sa->sa_iprank > t) |
1279 | sa->sa_iprank = t; |
1280 | } |
1281 | #endif /* IFF_POINTTOPOINT */ |
1282 | } |
1283 | #endif /*(!defined(AFS_SUN5_ENV)) && defined(USEIFADDR) */ |
1284 | #endif /* else AFS_USERSPACE_IP_ADDR */ |
1285 | |
1286 | #ifdef AFS_SGI62_ENV |
1287 | static int |
1288 | afsi_enum_set_rank(struct hashbucket *h, caddr_t mkey, caddr_t arg1, |
1289 | caddr_t arg2) |
1290 | { |
1291 | afsi_SetServerIPRank((struct srvAddr *)arg1, (struct in_ifaddrusr_in_ifaddr *)h); |
1292 | return 0; /* Never match, so we enumerate everyone */ |
1293 | } |
1294 | #endif /* AFS_SGI62_ENV */ |
1295 | static int |
1296 | afs_SetServerPrefs(struct srvAddr *sa) |
1297 | { |
1298 | #if defined(AFS_USERSPACE_IP_ADDR1) |
1299 | int i; |
1300 | |
1301 | sa->sa_iprank = LO; |
1302 | for (i = 0; i < afs_cb_interface.numberOfInterfaces; i++) { |
1303 | afsi_SetServerIPRank(sa, afs_cb_interface.addr_in[i], |
1304 | afs_cb_interface.subnetmask[i]); |
1305 | } |
1306 | #else /* AFS_USERSPACE_IP_ADDR */ |
1307 | #if defined(AFS_SUN5_ENV) |
1308 | #ifdef AFS_SUN510_ENV |
1309 | int i = 0; |
1310 | #else |
1311 | extern struct ill_s *ill_g_headp; |
1312 | long *addr = (long *)ill_g_headp; |
1313 | ill_t *ill; |
1314 | ipif_t *ipif; |
1315 | #endif |
1316 | int subnet, subnetmask, net, netmask; |
1317 | |
1318 | if (sa) |
1319 | sa->sa_iprank = 0; |
1320 | #ifdef AFS_SUN510_ENV |
1321 | rw_enter(&afsifinfo_lock, RW_READER); |
1322 | |
1323 | for (i = 0; (afsifinfo[i].ipaddr != NULL((void *)0)) && (i < ADDRSPERSITE16); i++) { |
1324 | |
1325 | if (IN_CLASSA(afsifinfo[i].ipaddr)(((u_int32_t)(afsifinfo[i].ipaddr) & 0x80000000) == 0)) { |
1326 | netmask = IN_CLASSA_NET0xff000000; |
1327 | } else if (IN_CLASSB(afsifinfo[i].ipaddr)(((u_int32_t)(afsifinfo[i].ipaddr) & 0xc0000000) == 0x80000000 )) { |
1328 | netmask = IN_CLASSB_NET0xffff0000; |
1329 | } else if (IN_CLASSC(afsifinfo[i].ipaddr)(((u_int32_t)(afsifinfo[i].ipaddr) & 0xe0000000) == 0xc0000000 )) { |
1330 | netmask = IN_CLASSC_NET0xffffff00; |
1331 | } else { |
1332 | netmask = 0; |
1333 | } |
1334 | net = afsifinfo[i].ipaddr & netmask; |
1335 | |
1336 | #ifdef notdef |
1337 | if (!s) { |
1338 | if (!rx_IsLoopbackAddr(afsifinfo[i].ipaddr)) { /* ignore loopback */ |
1339 | *cnt += 1; |
1340 | if (*cnt > 16) |
1341 | return; |
1342 | *addrp++ = afsifinfo[i].ipaddr; |
1343 | } |
1344 | } else |
1345 | #endif /* notdef */ |
1346 | { |
1347 | /* XXXXXX Do the individual ip ranking below XXXXX */ |
1348 | if ((sa->sa_ip & netmask) == net) { |
1349 | if ((sa->sa_ip & subnetmask) == subnet) { |
1350 | if (afsifinfo[i].ipaddr == sa->sa_ip) { /* ie, ME! */ |
1351 | sa->sa_iprank = TOPR; |
1352 | } else { |
1353 | sa->sa_iprank = HI + afsifinfo[i].metric; /* case #2 */ |
1354 | } |
1355 | } else { |
1356 | sa->sa_iprank = MED + afsifinfo[i].metric; /* case #3 */ |
1357 | } |
1358 | } else { |
1359 | sa->sa_iprank = LO + afsifinfo[i].metric; /* case #4 */ |
1360 | } |
1361 | /* check for case #5 -- point-to-point link */ |
1362 | if ((afsifinfo[i].flags & IFF_POINTOPOINT0x10) |
1363 | && (afsifinfo[i].dstaddr == sa->sa_ip)) { |
1364 | |
1365 | if (afsifinfo[i].metric >= (MAXDEFRANK60000 - MED) / PPWEIGHT) |
1366 | sa->sa_iprank = MAXDEFRANK60000; |
1367 | else |
1368 | sa->sa_iprank = MED + (PPWEIGHT << afsifinfo[i].metric); |
1369 | } |
1370 | } |
1371 | } |
1372 | |
1373 | rw_exit(&afsifinfo_lock); |
1374 | #else |
1375 | for (ill = (struct ill_s *)*addr /*ill_g_headp */ ; ill; |
1376 | ill = ill->ill_next) { |
1377 | /* Make sure this is an IPv4 ILL */ |
1378 | if (ill->ill_isv6) |
1379 | continue; |
1380 | for (ipif = ill->ill_ipif; ipif; ipif = ipif->ipif_next) { |
1381 | subnet = ipif->ipif_local_addr & ipif->ipif_net_mask; |
1382 | subnetmask = ipif->ipif_net_mask; |
1383 | /* |
1384 | * Generate the local net using the local address and |
1385 | * whate we know about Class A, B and C networks. |
1386 | */ |
1387 | if (IN_CLASSA(ipif->ipif_local_addr)(((u_int32_t)(ipif->ipif_local_addr) & 0x80000000) == 0 )) { |
1388 | netmask = IN_CLASSA_NET0xff000000; |
1389 | } else if (IN_CLASSB(ipif->ipif_local_addr)(((u_int32_t)(ipif->ipif_local_addr) & 0xc0000000) == 0x80000000 )) { |
1390 | netmask = IN_CLASSB_NET0xffff0000; |
1391 | } else if (IN_CLASSC(ipif->ipif_local_addr)(((u_int32_t)(ipif->ipif_local_addr) & 0xe0000000) == 0xc0000000 )) { |
1392 | netmask = IN_CLASSC_NET0xffffff00; |
1393 | } else { |
1394 | netmask = 0; |
1395 | } |
1396 | net = ipif->ipif_local_addr & netmask; |
1397 | #ifdef notdef |
1398 | if (!s) { |
1399 | if (!rx_IsLoopbackAddr(ipif->ipif_local_addr)) { /* ignore loopback */ |
1400 | *cnt += 1; |
1401 | if (*cnt > 16) |
1402 | return; |
1403 | *addrp++ = ipif->ipif_local_addr; |
1404 | } |
1405 | } else |
1406 | #endif /* notdef */ |
1407 | { |
1408 | /* XXXXXX Do the individual ip ranking below XXXXX */ |
1409 | if ((sa->sa_ip & netmask) == net) { |
1410 | if ((sa->sa_ip & subnetmask) == subnet) { |
1411 | if (ipif->ipif_local_addr == sa->sa_ip) { /* ie, ME! */ |
1412 | sa->sa_iprank = TOPR; |
1413 | } else { |
1414 | sa->sa_iprank = HI + ipif->ipif_metric; /* case #2 */ |
1415 | } |
1416 | } else { |
1417 | sa->sa_iprank = MED + ipif->ipif_metric; /* case #3 */ |
1418 | } |
1419 | } else { |
1420 | sa->sa_iprank = LO + ipif->ipif_metric; /* case #4 */ |
1421 | } |
1422 | /* check for case #5 -- point-to-point link */ |
1423 | if ((ipif->ipif_flags & IFF_POINTOPOINT0x10) |
1424 | && (ipif->ipif_pp_dst_addr == sa->sa_ip)) { |
1425 | |
1426 | if (ipif->ipif_metric >= (MAXDEFRANK60000 - MED) / PPWEIGHT) |
1427 | sa->sa_iprank = MAXDEFRANK60000; |
1428 | else |
1429 | sa->sa_iprank = MED + (PPWEIGHT << ipif->ipif_metric); |
1430 | } |
1431 | } |
1432 | } |
1433 | } |
1434 | #endif /* AFS_SUN510_ENV */ |
1435 | #else |
1436 | #ifndef USEIFADDR |
1437 | rx_ifnet_tstruct usr_ifnet * ifn = NULL((void *)0); |
1438 | struct in_ifaddrusr_in_ifaddr *ifad = (struct in_ifaddrusr_in_ifaddr *)0; |
1439 | struct sockaddr_in *sin; |
1440 | |
1441 | if (!sa) { |
1442 | #ifdef notdef /* clean up, remove this */ |
1443 | for (ifn = ifnetusr_ifnet; ifn != NULL((void *)0); ifn = ifn->if_next) { |
1444 | for (ifad = ifn->if_addrlist; ifad != NULL((void *)0); ifad = ifad->ifa_next) { |
1445 | if ((IFADDR2SA(ifad)->sa_family == AF_INET2) |
1446 | && !(ifn->if_flags & IFF_LOOPBACK0x8)) { |
1447 | *cnt += 1; |
1448 | if (*cnt > 16) |
1449 | return; |
1450 | *addrp++ = |
1451 | ((struct sockaddr_in *)IFADDR2SA(ifad))->sin_addr. |
1452 | s_addr; |
1453 | } |
1454 | }} |
1455 | #endif /* notdef */ |
1456 | return; |
1457 | } |
1458 | sa->sa_iprank = 0; |
1459 | #ifdef ADAPT_MTU |
1460 | ifn = rxi_FindIfnet(sa->sa_ip, &ifad); |
1461 | #endif |
1462 | if (ifn) { /* local, more or less */ |
1463 | #ifdef IFF_LOOPBACK0x8 |
1464 | if (ifn->if_flags & IFF_LOOPBACK0x8) { |
1465 | sa->sa_iprank = TOPR; |
1466 | goto end; |
1467 | } |
1468 | #endif /* IFF_LOOPBACK */ |
1469 | sin = (struct sockaddr_in *)IA_SIN(ifad)(&(ifad)->ia_addr); |
1470 | if (SA2ULONG(sin)((sin)->sin_addr.s_addr) == sa->sa_ip) { |
1471 | sa->sa_iprank = TOPR; |
1472 | goto end; |
1473 | } |
1474 | #ifdef IFF_BROADCAST0x2 |
1475 | if (ifn->if_flags & IFF_BROADCAST0x2) { |
1476 | if (sa->sa_ip == (sa->sa_ip & SA2ULONG(IA_BROAD(ifad))((((struct sockaddr_in *)(&((struct usr_in_ifaddr *)ifad) ->ia_broadaddr)))->sin_addr.s_addr))) { |
1477 | sa->sa_iprank = HI; |
1478 | goto end; |
1479 | } |
1480 | } |
1481 | #endif /* IFF_BROADCAST */ |
1482 | #ifdef IFF_POINTOPOINT0x10 |
1483 | if (ifn->if_flags & IFF_POINTOPOINT0x10) { |
1484 | if (sa->sa_ip == SA2ULONG(IA_DST(ifad))((((struct sockaddr_in *)(&((struct usr_in_ifaddr *)ifad) ->ia_dstaddr)))->sin_addr.s_addr)) { |
1485 | if (ifn->if_metric > 4) { |
1486 | sa->sa_iprank = LO; |
1487 | goto end; |
1488 | } else |
1489 | sa->sa_iprank = ifn->if_metric; |
1490 | } |
1491 | } |
1492 | #endif /* IFF_POINTOPOINT */ |
1493 | sa->sa_iprank += MED + ifn->if_metric; /* couldn't find anything better */ |
1494 | } |
1495 | #else /* USEIFADDR */ |
1496 | |
1497 | if (sa) |
1498 | sa->sa_iprank = LO; |
1499 | #ifdef AFS_SGI62_ENV |
1500 | (void)hash_enum(&hashinfo_inaddr, afsi_enum_set_rank, HTF_INET, NULL((void *)0), |
1501 | (caddr_t) sa, NULL((void *)0)); |
1502 | #elif defined(AFS_DARWIN80_ENV) |
1503 | { |
1504 | errno_t t; |
1505 | unsigned int count; |
1506 | int cnt=0, m, j; |
1507 | rx_ifaddr_tstruct usr_ifaddr * *ifads; |
1508 | rx_ifnet_tstruct usr_ifnet * *ifns; |
1509 | |
1510 | if (!ifnet_list_get(AF_INET2, &ifns, &count)) { |
1511 | for (m = 0; m < count; m++) { |
1512 | if (!ifnet_get_address_list(ifns[m], &ifads)) { |
1513 | for (j = 0; ifads[j] != NULL((void *)0) && cnt < ADDRSPERSITE16; j++) { |
1514 | afsi_SetServerIPRank(sa, ifads[j]); |
1515 | cnt++; |
1516 | } |
1517 | ifnet_free_address_list(ifads); |
1518 | } |
1519 | } |
1520 | ifnet_list_free(ifns); |
1521 | } |
1522 | } |
1523 | #elif defined(AFS_DARWIN_ENV) |
1524 | { |
1525 | rx_ifnet_tstruct usr_ifnet * ifn; |
1526 | rx_ifaddr_tstruct usr_ifaddr * ifa; |
1527 | TAILQ_FOREACH(ifn, &ifnet, if_link)for ((ifn) = (((&usr_ifnet))->tqh_first); (ifn); (ifn) = (((ifn))->if_link.tqe_next)) { |
1528 | TAILQ_FOREACH(ifa, &ifn->if_addrhead, ifa_link)for ((ifa) = (((&ifn->if_addrhead))->tqh_first); (ifa ); (ifa) = (((ifa))->ifa_link.tqe_next)) { |
1529 | afsi_SetServerIPRank(sa, ifa); |
1530 | }}} |
1531 | #elif defined(AFS_FBSD_ENV) |
1532 | { |
1533 | struct in_ifaddrusr_in_ifaddr *ifa; |
1534 | #if defined(AFS_FBSD80_ENV) |
1535 | TAILQ_FOREACH(ifa, &V_in_ifaddrhead, ia_link)for ((ifa) = (((&V_in_ifaddrhead))->tqh_first); (ifa); (ifa) = (((ifa))->ia_link.tqe_next)) { |
1536 | #else |
1537 | TAILQ_FOREACH(ifa, &in_ifaddrhead, ia_link)for ((ifa) = (((&in_ifaddrhead))->tqh_first); (ifa); ( ifa) = (((ifa))->ia_link.tqe_next)) { |
1538 | #endif |
1539 | afsi_SetServerIPRank(sa, ifa); |
1540 | }} |
1541 | #elif defined(AFS_OBSD_ENV) |
1542 | { |
1543 | extern struct in_ifaddrhead in_ifaddrusr_in_ifaddr; |
1544 | struct in_ifaddrusr_in_ifaddr *ifa; |
1545 | for (ifa = in_ifaddrusr_in_ifaddr.tqh_first; ifa; ifa = ifa->ia_list.tqe_next) |
1546 | afsi_SetServerIPRank(sa, ifa); |
1547 | } |
1548 | #elif defined(AFS_NBSD40_ENV) |
1549 | { |
1550 | extern struct in_ifaddrhead in_ifaddrhead; |
1551 | struct in_ifaddrusr_in_ifaddr *ifa; |
1552 | for (ifa = in_ifaddrhead.tqh_first; ifa; ifa = ifa->ia_list.tqe_next) |
1553 | afsi_SetServerIPRank(sa, ifa); |
1554 | } |
1555 | #else |
1556 | { |
1557 | struct in_ifaddrusr_in_ifaddr *ifa; |
1558 | for (ifa = in_ifaddrusr_in_ifaddr; ifa; ifa = ifa->ia_next) { |
1559 | afsi_SetServerIPRank(sa, ifa); |
1560 | }} |
1561 | #endif |
1562 | #endif /* USEIFADDR */ |
1563 | #ifndef USEIFADDR |
1564 | end: |
1565 | #endif |
1566 | #endif /* AFS_SUN5_ENV */ |
1567 | #endif /* else AFS_USERSPACE_IP_ADDR */ |
1568 | if (sa) |
1569 | sa->sa_iprank += afs_randomMod15(); |
1570 | |
1571 | return 0; |
1572 | } /* afs_SetServerPrefs */ |
1573 | |
1574 | #undef TOPR |
1575 | #undef HI |
1576 | #undef MED |
1577 | #undef LO |
1578 | #undef PPWEIGHT |
1579 | |
1580 | /* afs_FlushServer() |
1581 | * The addresses on this server struct has changed in some way and will |
1582 | * clean up all other structures that may reference it. |
1583 | * The afs_xserver and afs_xsrvAddr locks are assumed taken. |
1584 | */ |
1585 | void |
1586 | afs_FlushServer(struct server *srvp) |
1587 | { |
1588 | afs_int32 i; |
1589 | struct server *ts, **pts; |
1590 | |
1591 | /* Find any volumes residing on this server and flush their state */ |
1592 | afs_ResetVolumes(srvp); |
1593 | |
1594 | /* Flush all callbacks in the all vcaches for this specific server */ |
1595 | afs_FlushServerCBs(srvp); |
1596 | |
1597 | /* Remove all the callbacks structs */ |
1598 | if (srvp->cbrs) { |
1599 | struct afs_cbr *cb, *cbnext; |
1600 | |
1601 | ObtainWriteLock(&afs_xvcb, 300)do { ; if (!(&afs_xvcb)->excl_locked && !(& afs_xvcb)->readers_reading) (&afs_xvcb) -> excl_locked = 2; else Afs_Lock_Obtain(&afs_xvcb, 2); (&afs_xvcb) ->pid_writer = (get_user_struct()->u_procp->p_pid ); (&afs_xvcb)->src_indicator = 300; } while (0); |
1602 | for (cb = srvp->cbrs; cb; cb = cbnext) { |
1603 | cbnext = cb->next; |
1604 | afs_FreeCBR(cb); |
1605 | } srvp->cbrs = (struct afs_cbr *)0; |
1606 | ReleaseWriteLock(&afs_xvcb)do { ; (&afs_xvcb)->excl_locked &= ~2; if ((&afs_xvcb )->wait_states) Afs_Lock_ReleaseR(&afs_xvcb); (&afs_xvcb )->pid_writer=0; } while (0); |
1607 | } |
1608 | |
1609 | /* If no more srvAddr structs hanging off of this server struct, |
1610 | * then clean it up. |
1611 | */ |
1612 | if (!srvp->addr) { |
1613 | /* Remove the server structure from the cell list - if there */ |
1614 | afs_RemoveCellEntry(srvp); |
1615 | |
1616 | /* Remove from the afs_servers hash chain */ |
1617 | for (i = 0; i < NSERVERS16; i++) { |
1618 | for (pts = &(afs_servers[i]), ts = *pts; ts; |
1619 | pts = &(ts->next), ts = *pts) { |
1620 | if (ts == srvp) |
1621 | break; |
1622 | } |
1623 | if (ts) |
1624 | break; |
1625 | } |
1626 | if (ts) { |
1627 | *pts = ts->next; /* Found it. Remove it */ |
1628 | afs_osi_Free(ts, sizeof(struct server)); /* Free it */ |
1629 | afs_totalServers--; |
1630 | } |
1631 | } |
1632 | } |
1633 | |
1634 | /* afs_RemoveSrvAddr() |
1635 | * This removes a SrvAddr structure from its server structure. |
1636 | * The srvAddr struct is not free'd because it connections may still |
1637 | * be open to it. It is up to the calling process to make sure it |
1638 | * remains connected to a server struct. |
1639 | * The afs_xserver and afs_xsrvAddr locks are assumed taken. |
1640 | * It is not removed from the afs_srvAddrs hash chain. |
1641 | */ |
1642 | void |
1643 | afs_RemoveSrvAddr(struct srvAddr *sap) |
1644 | { |
1645 | struct srvAddr **psa, *sa; |
1646 | struct server *srv; |
1647 | |
1648 | if (!sap) |
1649 | return; |
1650 | srv = sap->server; |
1651 | |
1652 | /* Find the srvAddr in the server's list and remove it */ |
1653 | for (psa = &(srv->addr), sa = *psa; sa; psa = &(sa->next_sa), sa = *psa) { |
1654 | if (sa == sap) |
1655 | break; |
1656 | } if (sa) { |
1657 | *psa = sa->next_sa; |
1658 | sa->next_sa = 0; |
1659 | sa->server = 0; |
1660 | |
1661 | /* Flush the server struct since it's IP address has changed */ |
1662 | afs_FlushServer(srv); |
1663 | } |
1664 | } |
1665 | |
1666 | /* afs_GetCapabilities |
1667 | * Try and retrieve capabilities of a given file server. Carps on actual |
1668 | * failure. Servers are not expected to support this RPC. */ |
1669 | void |
1670 | afs_GetCapabilities(struct server *ts) |
1671 | { |
1672 | Capabilities caps = {0, NULL((void *)0)}; |
1673 | struct vrequest treq; |
1674 | struct afs_conn *tc; |
1675 | struct unixuser *tu; |
1676 | struct rx_connection *rxconn; |
1677 | afs_int32 code; |
1678 | |
1679 | if ( !ts || !ts->cell ) |
1680 | return; |
1681 | if ( !afs_osi_credp ) |
1682 | return; |
1683 | |
1684 | if ((code = afs_InitReq(&treq, afs_osi_credp))) |
1685 | return; |
1686 | tu = afs_GetUser(treq.uid, ts->cell->cellNum, SHARED_LOCK4); |
1687 | if ( !tu ) |
1688 | return; |
1689 | tc = afs_ConnBySA(ts->addr, ts->cell->fsport, ts->cell->cellNum, tu, 0, 1, |
1690 | SHARED_LOCK4, |
1691 | &rxconn); |
1692 | afs_PutUser(tu, SHARED_LOCK4); |
1693 | if ( !tc ) |
1694 | return; |
1695 | /* InitCallBackStateN, triggered by our RPC, may need this */ |
1696 | ReleaseWriteLock(&afs_xserver)do { ; (&afs_xserver)->excl_locked &= ~2; if ((& afs_xserver)->wait_states) Afs_Lock_ReleaseR(&afs_xserver ); (&afs_xserver)->pid_writer=0; } while (0); |
1697 | code = RXAFS_GetCapabilities(rxconn, &caps); |
1698 | ObtainWriteLock(&afs_xserver, 723)do { ; if (!(&afs_xserver)->excl_locked && !(& afs_xserver)->readers_reading) (&afs_xserver) -> excl_locked = 2; else Afs_Lock_Obtain(&afs_xserver, 2); (&afs_xserver )->pid_writer = (get_user_struct()->u_procp->p_pid ) ; (&afs_xserver)->src_indicator = 723; } while (0); |
1699 | /* we forced a conn above; important we mark it down if needed */ |
1700 | if ((code < 0) && (code != RXGEN_OPCODE-455)) { |
1701 | afs_ServerDown(tc->parent->srvr); |
1702 | ForceNewConnections(tc->parent->srvr); /* multi homed clients */ |
1703 | } |
1704 | afs_PutConn(tc, rxconn, SHARED_LOCK4); |
1705 | if ( code && code != RXGEN_OPCODE-455 ) { |
1706 | afs_warn("RXAFS_GetCapabilities failed with code %d\n", code); |
1707 | /* better not be anything to free. we failed! */ |
1708 | return; |
1709 | } |
1710 | |
1711 | ts->flags |= SCAPS_KNOWN0x400; |
1712 | |
1713 | if ( caps.Capabilities_len > 0 ) { |
1714 | ts->capabilities = caps.Capabilities_val[0]; |
1715 | xdr_freeafs_xdr_free((xdrproc_t)xdr_Capabilities, &caps); |
1716 | caps.Capabilities_len = 0; |
1717 | caps.Capabilities_val = NULL((void *)0); |
1718 | } |
1719 | |
1720 | } |
1721 | |
1722 | /* afs_GetServer() |
1723 | * Return an updated and properly initialized server structure |
1724 | * corresponding to the server ID, cell, and port specified. |
1725 | * If one does not exist, then one will be created. |
1726 | * aserver and aport must be in NET byte order. |
1727 | */ |
1728 | struct server * |
1729 | afs_GetServer(afs_uint32 * aserverp, afs_int32 nservers, afs_int32 acell, |
1730 | u_short aport, afs_int32 locktype, afsUUID * uuidp, |
1731 | afs_int32 addr_uniquifier) |
1732 | { |
1733 | struct server *oldts = 0, *ts, *newts, *orphts = 0; |
1734 | struct srvAddr *oldsa, *newsa, *nextsa, *orphsa; |
1735 | afs_int32 iphash, k, srvcount = 0; |
1736 | unsigned int srvhash; |
1737 | |
1738 | AFS_STATCNT(afs_GetServer)((afs_cmstats.callInfo.C_afs_GetServer)++); |
1739 | |
1740 | ObtainSharedLock(&afs_xserver, 13)do { ; if (!(&afs_xserver)->excl_locked) (&afs_xserver ) -> excl_locked = 4; else Afs_Lock_Obtain(&afs_xserver , 4); (&afs_xserver)->pid_writer = (get_user_struct()-> u_procp->p_pid ); (&afs_xserver)->src_indicator = 13 ; } while (0); |
1741 | |
1742 | /* Check if the server struct exists and is up to date */ |
1743 | if (!uuidp) { |
1744 | if (nservers != 1) |
1745 | panic("afs_GetServer: incorect count of servers")do{fprintf(__stderrp, "%s", "afs_GetServer: incorect count of servers" );do{if (!(0)) AssertionFailed("/home/wollman/openafs/src/afs/afs_server.c" , 1745);}while(0);}while(0); |
1746 | ObtainReadLock(&afs_xsrvAddr)do { ; if (!((&afs_xsrvAddr)->excl_locked & 2)) (( &afs_xsrvAddr)->readers_reading)++; else Afs_Lock_Obtain (&afs_xsrvAddr, 1); (&afs_xsrvAddr)->pid_last_reader = (get_user_struct()->u_procp->p_pid ); } while (0); |
1747 | ts = afs_FindServer(aserverp[0], aport, NULL((void *)0), locktype); |
1748 | ReleaseReadLock(&afs_xsrvAddr)do { ; if (!(--((&afs_xsrvAddr)->readers_reading)) && (&afs_xsrvAddr)->wait_states) Afs_Lock_ReleaseW(& afs_xsrvAddr) ; if ( (&afs_xsrvAddr)->pid_last_reader == (get_user_struct()->u_procp->p_pid ) ) (&afs_xsrvAddr )->pid_last_reader =0; } while (0); |
1749 | if (ts && !(ts->flags & SRVR_MULTIHOMED0x40)) { |
1750 | /* Found a server struct that is not multihomed and has the |
1751 | * IP address associated with it. A correct match. |
1752 | */ |
1753 | ReleaseSharedLock(&afs_xserver)do { ; (&afs_xserver)->excl_locked &= ~(4 | 2); if ((&afs_xserver)->wait_states) Afs_Lock_ReleaseR(& afs_xserver); (&afs_xserver)->pid_writer=0; } while (0 ); |
1754 | return (ts); |
1755 | } |
1756 | } else { |
1757 | if (nservers <= 0) |
1758 | panic("afs_GetServer: incorrect count of servers")do{fprintf(__stderrp, "%s", "afs_GetServer: incorrect count of servers" );do{if (!(0)) AssertionFailed("/home/wollman/openafs/src/afs/afs_server.c" , 1758);}while(0);}while(0); |
1759 | ts = afs_FindServer(0, aport, uuidp, locktype); |
1760 | if (ts && (ts->sr_addr_uniquifier_suid._srvUuid.addr_uniquifier == addr_uniquifier) && ts->addr) { |
1761 | /* Found a server struct that is multihomed and same |
1762 | * uniqufier (same IP addrs). The above if statement is the |
1763 | * same as in InstallUVolumeEntry(). |
1764 | */ |
1765 | ReleaseSharedLock(&afs_xserver)do { ; (&afs_xserver)->excl_locked &= ~(4 | 2); if ((&afs_xserver)->wait_states) Afs_Lock_ReleaseR(& afs_xserver); (&afs_xserver)->pid_writer=0; } while (0 ); |
1766 | return ts; |
1767 | } |
1768 | if (ts) |
1769 | oldts = ts; /* Will reuse if same uuid */ |
1770 | } |
1771 | |
1772 | UpgradeSToWLock(&afs_xserver, 36)do { ; if (!(&afs_xserver)->readers_reading) (&afs_xserver )->excl_locked = 2; else Afs_Lock_Obtain(&afs_xserver, 6); (&afs_xserver)->pid_writer = (get_user_struct()-> u_procp->p_pid ); (&afs_xserver)->src_indicator = 36 ; } while (0); |
1773 | ObtainWriteLock(&afs_xsrvAddr, 116)do { ; if (!(&afs_xsrvAddr)->excl_locked && !( &afs_xsrvAddr)->readers_reading) (&afs_xsrvAddr) -> excl_locked = 2; else Afs_Lock_Obtain(&afs_xsrvAddr, 2); (&afs_xsrvAddr)->pid_writer = (get_user_struct()-> u_procp->p_pid ); (&afs_xsrvAddr)->src_indicator = 116 ; } while (0); |
1774 | |
1775 | srvcount = afs_totalServers; |
1776 | |
1777 | /* Reuse/allocate a new server structure */ |
1778 | if (oldts) { |
1779 | newts = oldts; |
1780 | } else { |
1781 | newts = afs_osi_Alloc(sizeof(struct server)); |
1782 | if (!newts) |
1783 | panic("malloc of server struct")do{fprintf(__stderrp, "%s", "malloc of server struct");do{if ( !(0)) AssertionFailed("/home/wollman/openafs/src/afs/afs_server.c" , 1783);}while(0);}while(0); |
1784 | afs_totalServers++; |
1785 | memset(newts, 0, sizeof(struct server)); |
1786 | |
1787 | /* Add the server struct to the afs_servers[] hash chain */ |
1788 | srvhash = |
1789 | (uuidp ? (afs_uuid_hash(uuidp) % NSERVERS16) : SHash(aserverp[0])(((__builtin_constant_p(aserverp[0]) ? ((((__uint32_t)(aserverp [0])) >> 24) | ((((__uint32_t)(aserverp[0])) & (0xff << 16)) >> 8) | ((((__uint32_t)(aserverp[0])) & (0xff << 8)) << 8) | (((__uint32_t)(aserverp[0]) ) << 24)) : __bswap32_var(aserverp[0]))) & (16 -1))); |
1790 | newts->next = afs_servers[srvhash]; |
1791 | afs_servers[srvhash] = newts; |
1792 | } |
1793 | |
1794 | /* Initialize the server structure */ |
1795 | if (uuidp) { /* Multihomed */ |
1796 | newts->sr_uuid_suid._srvUuid.suuid = *uuidp; |
1797 | newts->sr_addr_uniquifier_suid._srvUuid.addr_uniquifier = addr_uniquifier; |
1798 | newts->flags |= SRVR_MULTIHOMED0x40; |
1799 | } |
1800 | if (acell) |
1801 | newts->cell = afs_GetCell(acell, 0); |
1802 | |
1803 | /* For each IP address we are registering */ |
1804 | for (k = 0; k < nservers; k++) { |
1805 | iphash = SHash(aserverp[k])(((__builtin_constant_p(aserverp[k]) ? ((((__uint32_t)(aserverp [k])) >> 24) | ((((__uint32_t)(aserverp[k])) & (0xff << 16)) >> 8) | ((((__uint32_t)(aserverp[k])) & (0xff << 8)) << 8) | (((__uint32_t)(aserverp[k]) ) << 24)) : __bswap32_var(aserverp[k]))) & (16 -1)); |
1806 | |
1807 | /* Check if the srvAddr structure already exists. If so, remove |
1808 | * it from its server structure and add it to the new one. |
1809 | */ |
1810 | for (oldsa = afs_srvAddrs[iphash]; oldsa; oldsa = oldsa->next_bkt) { |
1811 | if ((oldsa->sa_ip == aserverp[k]) && (oldsa->sa_portal == aport)) |
1812 | break; |
1813 | } |
1814 | if (oldsa && (oldsa->server != newts)) { |
1815 | afs_RemoveSrvAddr(oldsa); /* Remove from its server struct */ |
1816 | oldsa->next_sa = newts->addr; /* Add to the new server struct */ |
1817 | newts->addr = oldsa; |
1818 | } |
1819 | |
1820 | /* Reuse/allocate a new srvAddr structure */ |
1821 | if (oldsa) { |
1822 | newsa = oldsa; |
1823 | } else { |
1824 | newsa = afs_osi_Alloc(sizeof(struct srvAddr)); |
1825 | if (!newsa) |
1826 | panic("malloc of srvAddr struct")do{fprintf(__stderrp, "%s", "malloc of srvAddr struct");do{if (!(0)) AssertionFailed("/home/wollman/openafs/src/afs/afs_server.c" , 1826);}while(0);}while(0); |
1827 | afs_totalSrvAddrs++; |
1828 | memset(newsa, 0, sizeof(struct srvAddr)); |
1829 | |
1830 | /* Add the new srvAddr to the afs_srvAddrs[] hash chain */ |
1831 | newsa->next_bkt = afs_srvAddrs[iphash]; |
1832 | afs_srvAddrs[iphash] = newsa; |
1833 | |
1834 | /* Hang off of the server structure */ |
1835 | newsa->next_sa = newts->addr; |
1836 | newts->addr = newsa; |
1837 | |
1838 | /* Initialize the srvAddr Structure */ |
1839 | newsa->sa_ip = aserverp[k]; |
1840 | newsa->sa_portal = aport; |
1841 | } |
1842 | |
1843 | /* Update the srvAddr Structure */ |
1844 | newsa->server = newts; |
1845 | if (newts->flags & SRVR_ISDOWN0x20) |
1846 | newsa->sa_flags |= SRVADDR_ISDOWN0x20; |
1847 | if (uuidp) |
1848 | newsa->sa_flags |= SRVADDR_MH1; |
1849 | else |
1850 | newsa->sa_flags &= ~SRVADDR_MH1; |
1851 | |
1852 | /* Compute preference values and resort */ |
1853 | if (!newsa->sa_iprank) { |
1854 | afs_SetServerPrefs(newsa); /* new server rank */ |
1855 | } |
1856 | } |
1857 | afs_SortOneServer(newts); /* Sort by rank */ |
1858 | |
1859 | /* If we reused the server struct, remove any of its srvAddr |
1860 | * structs that will no longer be associated with this server. |
1861 | */ |
1862 | if (oldts) { /* reused the server struct */ |
1863 | for (orphsa = newts->addr; orphsa; orphsa = nextsa) { |
1864 | nextsa = orphsa->next_sa; |
1865 | for (k = 0; k < nservers; k++) { |
1866 | if (orphsa->sa_ip == aserverp[k]) |
1867 | break; /* belongs */ |
1868 | } |
1869 | if (k < nservers) |
1870 | continue; /* belongs */ |
1871 | |
1872 | /* Have a srvAddr struct. Now get a server struct (if not already) */ |
1873 | if (!orphts) { |
1874 | orphts = afs_osi_Alloc(sizeof(struct server)); |
1875 | if (!orphts) |
1876 | panic("malloc of lo server struct")do{fprintf(__stderrp, "%s", "malloc of lo server struct");do{ if (!(0)) AssertionFailed("/home/wollman/openafs/src/afs/afs_server.c" , 1876);}while(0);}while(0); |
1877 | memset(orphts, 0, sizeof(struct server)); |
1878 | afs_totalServers++; |
1879 | |
1880 | /* Add the orphaned server to the afs_servers[] hash chain. |
1881 | * Its iphash does not matter since we never look up the server |
1882 | * in the afs_servers table by its ip address (only by uuid - |
1883 | * which this has none). |
1884 | */ |
1885 | iphash = SHash(aserverp[k])(((__builtin_constant_p(aserverp[k]) ? ((((__uint32_t)(aserverp [k])) >> 24) | ((((__uint32_t)(aserverp[k])) & (0xff << 16)) >> 8) | ((((__uint32_t)(aserverp[k])) & (0xff << 8)) << 8) | (((__uint32_t)(aserverp[k]) ) << 24)) : __bswap32_var(aserverp[k]))) & (16 -1)); |
1886 | orphts->next = afs_servers[iphash]; |
1887 | afs_servers[iphash] = orphts; |
1888 | |
1889 | if (acell) |
1890 | orphts->cell = afs_GetCell(acell, 0); |
1891 | } |
1892 | |
1893 | /* Hang the srvAddr struct off of the server structure. The server |
1894 | * may have multiple srvAddrs, but it won't be marked multihomed. |
1895 | */ |
1896 | afs_RemoveSrvAddr(orphsa); /* remove */ |
1897 | orphsa->next_sa = orphts->addr; /* hang off server struct */ |
1898 | orphts->addr = orphsa; |
1899 | orphsa->server = orphts; |
1900 | orphsa->sa_flags |= SRVADDR_NOUSE0x40; /* flag indicating not in use */ |
1901 | orphsa->sa_flags &= ~SRVADDR_MH1; /* Not multihomed */ |
1902 | } |
1903 | } |
1904 | |
1905 | srvcount = afs_totalServers - srvcount; /* # servers added and removed */ |
1906 | if (srvcount) { |
1907 | struct afs_stats_SrvUpDownInfo *upDownP; |
1908 | /* With the introduction of this new record, we need to adjust the |
1909 | * proper individual & global server up/down info. |
1910 | */ |
1911 | upDownP = GetUpDownStats(newts); |
1912 | upDownP->numTtlRecords += srvcount; |
1913 | afs_stats_cmperf.srvRecords += srvcount; |
1914 | if (afs_stats_cmperf.srvRecords > afs_stats_cmperf.srvRecordsHWM) |
1915 | afs_stats_cmperf.srvRecordsHWM = afs_stats_cmperf.srvRecords; |
1916 | } |
1917 | |
1918 | ReleaseWriteLock(&afs_xsrvAddr)do { ; (&afs_xsrvAddr)->excl_locked &= ~2; if ((& afs_xsrvAddr)->wait_states) Afs_Lock_ReleaseR(&afs_xsrvAddr ); (&afs_xsrvAddr)->pid_writer=0; } while (0); |
1919 | |
1920 | if ( aport == AFS_FSPORT((unsigned short) (__builtin_constant_p(7000) ? (__uint16_t)( ((__uint16_t)(7000)) << 8 | ((__uint16_t)(7000)) >> 8) : __bswap16_var(7000))) && !(newts->flags & SCAPS_KNOWN0x400)) |
1921 | afs_GetCapabilities(newts); |
1922 | |
1923 | ReleaseWriteLock(&afs_xserver)do { ; (&afs_xserver)->excl_locked &= ~2; if ((& afs_xserver)->wait_states) Afs_Lock_ReleaseR(&afs_xserver ); (&afs_xserver)->pid_writer=0; } while (0); |
1924 | return (newts); |
1925 | } /* afs_GetServer */ |
1926 | |
1927 | void |
1928 | afs_ActivateServer(struct srvAddr *sap) |
1929 | { |
1930 | osi_timeval_t currTime; /*Filled with current time */ |
1931 | osi_timeval_t *currTimeP; /*Ptr to above */ |
1932 | struct afs_stats_SrvUpDownInfo *upDownP; /*Ptr to up/down info record */ |
1933 | struct server *aserver = sap->server; |
1934 | |
1935 | if (!(aserver->flags & AFS_SERVER_FLAG_ACTIVATED0x01)) { |
1936 | /* |
1937 | * This server record has not yet been activated. Go for it, |
1938 | * recording its ``birth''. |
1939 | */ |
1940 | aserver->flags |= AFS_SERVER_FLAG_ACTIVATED0x01; |
1941 | currTimeP = &currTime; |
1942 | osi_GetuTime(currTimeP)osi_GetTime(currTimeP); |
1943 | aserver->activationTime = currTime.tv_sec; |
1944 | upDownP = GetUpDownStats(aserver); |
1945 | if (aserver->flags & SRVR_ISDOWN0x20) { |
1946 | upDownP->numDownRecords++; |
1947 | } else { |
1948 | upDownP->numUpRecords++; |
1949 | upDownP->numRecordsNeverDown++; |
1950 | } |
1951 | } |
1952 | } |
1953 | |
1954 | void |
1955 | afs_RemoveAllConns(void) |
1956 | { |
1957 | int i; |
1958 | struct server *ts, *nts; |
1959 | struct srvAddr *sa; |
1960 | |
1961 | ObtainReadLock(&afs_xserver)do { ; if (!((&afs_xserver)->excl_locked & 2)) ((& afs_xserver)->readers_reading)++; else Afs_Lock_Obtain(& afs_xserver, 1); (&afs_xserver)->pid_last_reader = (get_user_struct ()->u_procp->p_pid ); } while (0); |
1962 | ObtainWriteLock(&afs_xconn, 1001)do { ; if (!(&afs_xconn)->excl_locked && !(& afs_xconn)->readers_reading) (&afs_xconn) -> excl_locked = 2; else Afs_Lock_Obtain(&afs_xconn, 2); (&afs_xconn )->pid_writer = (get_user_struct()->u_procp->p_pid ) ; (&afs_xconn)->src_indicator = 1001; } while (0); |
1963 | |
1964 | /*printf("Destroying connections ... ");*/ |
1965 | for (i = 0; i < NSERVERS16; i++) { |
1966 | for (ts = afs_servers[i]; ts; ts = nts) { |
1967 | nts = ts->next; |
1968 | for (sa = ts->addr; sa; sa = sa->next_sa) { |
1969 | if (sa->conns) { |
1970 | afs_ReleaseConns(sa->conns); |
1971 | sa->conns = NULL((void *)0); |
1972 | } |
1973 | } |
1974 | } |
1975 | } |
1976 | /*printf("done\n");*/ |
1977 | |
1978 | ReleaseWriteLock(&afs_xconn)do { ; (&afs_xconn)->excl_locked &= ~2; if ((& afs_xconn)->wait_states) Afs_Lock_ReleaseR(&afs_xconn) ; (&afs_xconn)->pid_writer=0; } while (0); |
1979 | ReleaseReadLock(&afs_xserver)do { ; if (!(--((&afs_xserver)->readers_reading)) && (&afs_xserver)->wait_states) Afs_Lock_ReleaseW(&afs_xserver ) ; if ( (&afs_xserver)->pid_last_reader == (get_user_struct ()->u_procp->p_pid ) ) (&afs_xserver)->pid_last_reader =0; } while (0); |
1980 | |
1981 | } |
1982 | |
1983 | void |
1984 | afs_MarkAllServersUp(void) |
1985 | { |
1986 | int i; |
1987 | struct server *ts; |
1988 | struct srvAddr *sa; |
1989 | |
1990 | ObtainWriteLock(&afs_xserver, 721)do { ; if (!(&afs_xserver)->excl_locked && !(& afs_xserver)->readers_reading) (&afs_xserver) -> excl_locked = 2; else Afs_Lock_Obtain(&afs_xserver, 2); (&afs_xserver )->pid_writer = (get_user_struct()->u_procp->p_pid ) ; (&afs_xserver)->src_indicator = 721; } while (0); |
1991 | ObtainWriteLock(&afs_xsrvAddr, 722)do { ; if (!(&afs_xsrvAddr)->excl_locked && !( &afs_xsrvAddr)->readers_reading) (&afs_xsrvAddr) -> excl_locked = 2; else Afs_Lock_Obtain(&afs_xsrvAddr, 2); (&afs_xsrvAddr)->pid_writer = (get_user_struct()-> u_procp->p_pid ); (&afs_xsrvAddr)->src_indicator = 722 ; } while (0); |
1992 | for (i = 0; i< NSERVERS16; i++) { |
1993 | for (ts = afs_servers[i]; ts; ts = ts->next) { |
1994 | for (sa = ts->addr; sa; sa = sa->next_sa) { |
1995 | afs_MarkServerUpOrDown(sa, 0); |
1996 | } |
1997 | } |
1998 | } |
1999 | ReleaseWriteLock(&afs_xsrvAddr)do { ; (&afs_xsrvAddr)->excl_locked &= ~2; if ((& afs_xsrvAddr)->wait_states) Afs_Lock_ReleaseR(&afs_xsrvAddr ); (&afs_xsrvAddr)->pid_writer=0; } while (0); |
2000 | ReleaseWriteLock(&afs_xserver)do { ; (&afs_xserver)->excl_locked &= ~2; if ((& afs_xserver)->wait_states) Afs_Lock_ReleaseR(&afs_xserver ); (&afs_xserver)->pid_writer=0; } while (0); |
2001 | } |