| File: | afs/afs_daemons.c |
| Location: | line 146, column 5 |
| Description: | Value stored to 'last1MinCheck' is never read |
| 1 | /* |
| 2 | * Copyright 2000, International Business Machines Corporation and others. |
| 3 | * All Rights Reserved. |
| 4 | * |
| 5 | * This software has been released under the terms of the IBM Public |
| 6 | * License. For details, see the LICENSE file in the top-level source |
| 7 | * directory or online at http://www.openafs.org/dl/license10.html |
| 8 | */ |
| 9 | |
| 10 | #include <afsconfig.h> |
| 11 | #include "afs/param.h" |
| 12 | |
| 13 | |
| 14 | #ifdef AFS_AIX51_ENV |
| 15 | #define __FULL_PROTO |
| 16 | #include <sys/sleep.h> |
| 17 | #endif |
| 18 | |
| 19 | #include "afs/sysincludes.h" /* Standard vendor system headers */ |
| 20 | #include "afsincludes.h" /* Afs-based standard headers */ |
| 21 | #include "afs/afs_stats.h" /* statistics gathering code */ |
| 22 | #include "afs/afs_cbqueue.h" |
| 23 | #ifdef AFS_AIX_ENV |
| 24 | #include <sys/adspace.h> /* for vm_att(), vm_det() */ |
| 25 | #endif |
| 26 | |
| 27 | #if defined(AFS_CACHE_BYPASS) |
| 28 | #include "afs/afs_bypasscache.h" |
| 29 | #endif// defined(AFS_CACHE_BYPASS) |
| 30 | /* background request queue size */ |
| 31 | afs_lock_t afs_xbrs; /* lock for brs */ |
| 32 | static int brsInit = 0; |
| 33 | short afs_brsWaiters = 0; /* number of users waiting for brs buffers */ |
| 34 | short afs_brsDaemons = 0; /* number of daemons waiting for brs requests */ |
| 35 | struct brequest afs_brs[NBRS15]; /* request structures */ |
| 36 | struct afs_osi_WaitHandle AFS_WaitHandler, AFS_CSWaitHandler; |
| 37 | static int afs_brs_count = 0; /* request counter, to service reqs in order */ |
| 38 | |
| 39 | static int rxepoch_checked = 0; |
| 40 | #define afs_CheckRXEpoch(){if (rxepoch_checked == 0 && rxkad_EpochWasSet) { rxepoch_checked = 1; afs_GCUserData( 1); } } {if (rxepoch_checked == 0 && rxkad_EpochWasSet) { \ |
| 41 | rxepoch_checked = 1; afs_GCUserData(/* force flag */ 1); } } |
| 42 | |
| 43 | /* PAG garbage collection */ |
| 44 | /* We induce a compile error if param.h does not define AFS_GCPAGS */ |
| 45 | afs_int32 afs_gcpags = AFS_GCPAGS0; |
| 46 | afs_int32 afs_gcpags_procsize = 0; |
| 47 | |
| 48 | afs_int32 afs_CheckServerDaemonStarted = 0; |
| 49 | #ifndef DEFAULT_PROBE_INTERVAL30 |
| 50 | #define DEFAULT_PROBE_INTERVAL30 30 /* default to 3 min */ |
| 51 | #endif |
| 52 | afs_int32 afs_probe_interval = DEFAULT_PROBE_INTERVAL30; |
| 53 | afs_int32 afs_probe_all_interval = 600; |
| 54 | afs_int32 afs_nat_probe_interval = 60; |
| 55 | afs_int32 afs_preCache = 0; |
| 56 | |
| 57 | #define PROBE_WAIT()(1000 * (afs_probe_interval - ((afs_random() & 0x7fffffff ) % (afs_probe_interval/2)))) (1000 * (afs_probe_interval - ((afs_random() & 0x7fffffff) \ |
| 58 | % (afs_probe_interval/2)))) |
| 59 | |
| 60 | void |
| 61 | afs_SetCheckServerNATmode(int isnat) |
| 62 | { |
| 63 | static afs_int32 old_intvl, old_all_intvl; |
| 64 | static int wasnat; |
| 65 | |
| 66 | if (isnat && !wasnat) { |
| 67 | old_intvl = afs_probe_interval; |
| 68 | old_all_intvl = afs_probe_all_interval; |
| 69 | afs_probe_interval = afs_nat_probe_interval; |
| 70 | afs_probe_all_interval = afs_nat_probe_interval; |
| 71 | afs_osi_CancelWait(&AFS_CSWaitHandler); |
| 72 | } else if (!isnat && wasnat) { |
| 73 | afs_probe_interval = old_intvl; |
| 74 | afs_probe_all_interval = old_all_intvl; |
| 75 | } |
| 76 | wasnat = isnat; |
| 77 | } |
| 78 | |
| 79 | void |
| 80 | afs_CheckServerDaemon(void) |
| 81 | { |
| 82 | afs_int32 now, delay, lastCheck, last10MinCheck; |
| 83 | |
| 84 | afs_CheckServerDaemonStarted = 1; |
| 85 | |
| 86 | while (afs_initState < 101) |
| 87 | afs_osi_Sleep(&afs_initState); |
| 88 | afs_osi_Wait(PROBE_WAIT()(1000 * (afs_probe_interval - ((afs_random() & 0x7fffffff ) % (afs_probe_interval/2)))), &AFS_CSWaitHandler, 0); |
| 89 | |
| 90 | last10MinCheck = lastCheck = osi_Time()time_second; |
| 91 | while (1) { |
| 92 | if (afs_termState == AFSOP_STOP_CS216) { |
| 93 | afs_termState = AFSOP_STOP_BKG212; |
| 94 | afs_osi_Wakeup(&afs_termState); |
| 95 | break; |
| 96 | } |
| 97 | |
| 98 | now = osi_Time()time_second; |
| 99 | if (afs_probe_interval + lastCheck <= now) { |
| 100 | afs_CheckServers(1, NULL((void *)0)); /* check down servers */ |
| 101 | lastCheck = now = osi_Time()time_second; |
| 102 | } |
| 103 | |
| 104 | if (afs_probe_all_interval + last10MinCheck <= now) { |
| 105 | afs_Trace1(afs_iclSetp, CM_TRACE_PROBEUP, ICL_TYPE_INT32, afs_probe_all_interval)(((afs_iclSetp) && (afs_iclSetp->states & 2)) ? afs_icl_Event1(afs_iclSetp, (701087797L), (1<<24)+((7) <<18), (long)(afs_probe_all_interval)) : 0); |
| 106 | afs_CheckServers(0, NULL((void *)0)); |
| 107 | last10MinCheck = now = osi_Time()time_second; |
| 108 | } |
| 109 | /* shutdown check. */ |
| 110 | if (afs_termState == AFSOP_STOP_CS216) { |
| 111 | afs_termState = AFSOP_STOP_BKG212; |
| 112 | afs_osi_Wakeup(&afs_termState); |
| 113 | break; |
| 114 | } |
| 115 | |
| 116 | /* Compute time to next probe. */ |
| 117 | delay = afs_probe_interval + lastCheck; |
| 118 | if (delay > afs_probe_all_interval + last10MinCheck) |
| 119 | delay = afs_probe_all_interval + last10MinCheck; |
| 120 | delay -= now; |
| 121 | if (delay < 1) |
| 122 | delay = 1; |
| 123 | afs_osi_Wait(delay * 1000, &AFS_CSWaitHandler, 0); |
| 124 | } |
| 125 | afs_CheckServerDaemonStarted = 0; |
| 126 | } |
| 127 | |
| 128 | extern int vfs_context_ref; |
| 129 | |
| 130 | /* This function always holds the GLOCK whilst it is running. The caller |
| 131 | * gets the GLOCK before invoking it, and afs_osi_Sleep drops the GLOCK |
| 132 | * whilst we are sleeping, and regains it when we're woken up. |
| 133 | */ |
| 134 | void |
| 135 | afs_Daemon(void) |
| 136 | { |
| 137 | afs_int32 code; |
| 138 | struct afs_exporter *exporter; |
| 139 | afs_int32 now; |
| 140 | afs_int32 last3MinCheck, last10MinCheck, last60MinCheck, lastNMinCheck; |
| 141 | afs_int32 last1MinCheck, last5MinCheck; |
| 142 | afs_uint32 lastCBSlotBump; |
| 143 | char cs_warned = 0; |
| 144 | |
| 145 | AFS_STATCNT(afs_Daemon)((afs_cmstats.callInfo.C_afs_Daemon)++); |
| 146 | last1MinCheck = last3MinCheck = last60MinCheck = last10MinCheck = |
Value stored to 'last1MinCheck' is never read | |
| 147 | last5MinCheck = lastNMinCheck = 0; |
| 148 | |
| 149 | afs_rootFid.Fid.Volume = 0; |
| 150 | while (afs_initState < 101) |
| 151 | afs_osi_Sleep(&afs_initState); |
| 152 | |
| 153 | #ifdef AFS_DARWIN80_ENV |
| 154 | if (afs_osi_ctxtp_initialized) |
| 155 | osi_Panic("vfs context already initialized"); |
| 156 | while (afs_osi_ctxtp && vfs_context_ref) |
| 157 | afs_osi_Sleep(&afs_osi_ctxtp); |
| 158 | if (afs_osi_ctxtp && !vfs_context_ref) |
| 159 | vfs_context_rele(afs_osi_ctxtp); |
| 160 | afs_osi_ctxtp = vfs_context_create(NULL((void *)0)); |
| 161 | afs_osi_ctxtp_initialized = 1; |
| 162 | #endif |
| 163 | now = osi_Time()time_second; |
| 164 | lastCBSlotBump = now; |
| 165 | |
| 166 | /* when a lot of clients are booted simultaneously, they develop |
| 167 | * annoying synchronous VL server bashing behaviors. So we stagger them. |
| 168 | */ |
| 169 | last1MinCheck = now + ((afs_random() & 0x7fffffff) % 60); /* an extra 30 */ |
| 170 | last3MinCheck = now - 90 + ((afs_random() & 0x7fffffff) % 180); |
| 171 | last60MinCheck = now - 1800 + ((afs_random() & 0x7fffffff) % 3600); |
| 172 | last10MinCheck = now - 300 + ((afs_random() & 0x7fffffff) % 600); |
| 173 | last5MinCheck = now - 150 + ((afs_random() & 0x7fffffff) % 300); |
| 174 | lastNMinCheck = now - 90 + ((afs_random() & 0x7fffffff) % 180); |
| 175 | |
| 176 | /* start off with afs_initState >= 101 (basic init done) */ |
| 177 | while (1) { |
| 178 | afs_CheckCallbacks(20); /* unstat anything which will expire soon */ |
| 179 | |
| 180 | /* things to do every 20 seconds or less - required by protocol spec */ |
| 181 | if (afs_nfsexporter) |
| 182 | afs_FlushActiveVcaches(0); /* flush NFS writes */ |
| 183 | afs_FlushVCBs(1); /* flush queued callbacks */ |
| 184 | |
| 185 | afs_MaybeWakeupTruncateDaemon(); /* free cache space if have too */ |
| 186 | rx_CheckPackets(); /* Does RX need more packets? */ |
| 187 | |
| 188 | now = osi_Time()time_second; |
| 189 | if (lastCBSlotBump + CBHTSLOTLEN128 < now) { /* pretty time-dependant */ |
| 190 | lastCBSlotBump = now; |
| 191 | if (afs_BumpBase()) { |
| 192 | afs_CheckCallbacks(20); /* unstat anything which will expire soon */ |
| 193 | } |
| 194 | } |
| 195 | |
| 196 | if (last1MinCheck + 60 < now) { |
| 197 | /* things to do every minute */ |
| 198 | DFlush(); /* write out dir buffers */ |
| 199 | afs_WriteThroughDSlots(); /* write through cacheinfo entries */ |
| 200 | ObtainWriteLock(&afs_xvcache, 736)do { ; if (!(&afs_xvcache)->excl_locked && !(& afs_xvcache)->readers_reading) (&afs_xvcache) -> excl_locked = 2; else Afs_Lock_Obtain(&afs_xvcache, 2); (&afs_xvcache )->pid_writer = (((__curthread())->td_proc)->p_pid ) ; (&afs_xvcache)->src_indicator = 736; } while (0); |
| 201 | afs_FlushReclaimedVcaches(); |
| 202 | ReleaseWriteLock(&afs_xvcache)do { ; (&afs_xvcache)->excl_locked &= ~2; if ((& afs_xvcache)->wait_states) Afs_Lock_ReleaseR(&afs_xvcache ); (&afs_xvcache)->pid_writer=0; } while (0); |
| 203 | afs_FlushActiveVcaches(1); /* keep flocks held & flush nfs writes */ |
| 204 | #if 0 |
| 205 | afs_StoreDirtyVcaches(); |
| 206 | #endif |
| 207 | afs_CheckRXEpoch(){if (rxepoch_checked == 0 && rxkad_EpochWasSet) { rxepoch_checked = 1; afs_GCUserData( 1); } }; |
| 208 | last1MinCheck = now; |
| 209 | } |
| 210 | |
| 211 | if (last3MinCheck + 180 < now) { |
| 212 | afs_CheckTokenCache(); /* check for access cache resets due to expired |
| 213 | * tickets */ |
| 214 | last3MinCheck = now; |
| 215 | } |
| 216 | |
| 217 | if (afsd_dynamic_vcaches0 && (last5MinCheck + 300 < now)) { |
| 218 | /* start with trying to drop us back to our base usage */ |
| 219 | int anumber = VCACHE_FREE5 + (afs_vcount - afs_cacheStats); |
| 220 | |
| 221 | if (anumber > 0) { |
| 222 | ObtainWriteLock(&afs_xvcache, 734)do { ; if (!(&afs_xvcache)->excl_locked && !(& afs_xvcache)->readers_reading) (&afs_xvcache) -> excl_locked = 2; else Afs_Lock_Obtain(&afs_xvcache, 2); (&afs_xvcache )->pid_writer = (((__curthread())->td_proc)->p_pid ) ; (&afs_xvcache)->src_indicator = 734; } while (0); |
| 223 | afs_ShakeLooseVCaches(anumber); |
| 224 | ReleaseWriteLock(&afs_xvcache)do { ; (&afs_xvcache)->excl_locked &= ~2; if ((& afs_xvcache)->wait_states) Afs_Lock_ReleaseR(&afs_xvcache ); (&afs_xvcache)->pid_writer=0; } while (0); |
| 225 | } |
| 226 | last5MinCheck = now; |
| 227 | } |
| 228 | |
| 229 | if (!afs_CheckServerDaemonStarted) { |
| 230 | /* Do the check here if the correct afsd is not installed. */ |
| 231 | if (!cs_warned) { |
| 232 | cs_warned = 1; |
| 233 | afs_warn("Please install afsd with check server daemon.\n"); |
| 234 | } |
| 235 | if (lastNMinCheck + afs_probe_interval < now) { |
| 236 | /* only check down servers */ |
| 237 | afs_CheckServers(1, NULL((void *)0)); |
| 238 | lastNMinCheck = now; |
| 239 | } |
| 240 | } |
| 241 | if (last10MinCheck + 600 < now) { |
| 242 | #ifdef AFS_USERSPACE_IP_ADDR |
| 243 | extern int rxi_GetcbiInfo(void); |
| 244 | #endif |
| 245 | afs_Trace1(afs_iclSetp, CM_TRACE_PROBEUP, ICL_TYPE_INT32, 600)(((afs_iclSetp) && (afs_iclSetp->states & 2)) ? afs_icl_Event1(afs_iclSetp, (701087797L), (1<<24)+((7) <<18), (long)(600)) : 0); |
| 246 | #ifdef AFS_USERSPACE_IP_ADDR |
| 247 | if (rxi_GetcbiInfo()) { /* addresses changed from last time */ |
| 248 | afs_FlushCBs(); |
| 249 | } |
| 250 | #else /* AFS_USERSPACE_IP_ADDR */ |
| 251 | if (rxi_GetIFInfo()) { /* addresses changed from last time */ |
| 252 | afs_FlushCBs(); |
| 253 | } |
| 254 | #endif /* else AFS_USERSPACE_IP_ADDR */ |
| 255 | if (!afs_CheckServerDaemonStarted) |
| 256 | afs_CheckServers(0, NULL((void *)0)); |
| 257 | afs_GCUserData(0); /* gc old conns */ |
| 258 | /* This is probably the wrong way of doing GC for the various exporters but it will suffice for a while */ |
| 259 | for (exporter = root_exported; exporter; |
| 260 | exporter = exporter->exp_next) { |
| 261 | (void)EXP_GC(exporter, 0)(*(exporter)->exp_op->export_garbagecollect)(exporter, 0 ); /* Generalize params */ |
| 262 | } |
| 263 | { |
| 264 | static int cnt = 0; |
| 265 | if (++cnt < 12) { |
| 266 | afs_CheckVolumeNames(AFS_VOLCHECK_EXPIRED0x1 | |
| 267 | AFS_VOLCHECK_BUSY0x2); |
| 268 | } else { |
| 269 | cnt = 0; |
| 270 | afs_CheckVolumeNames(AFS_VOLCHECK_EXPIRED0x1 | |
| 271 | AFS_VOLCHECK_BUSY0x2 | |
| 272 | AFS_VOLCHECK_MTPTS0x4); |
| 273 | } |
| 274 | } |
| 275 | last10MinCheck = now; |
| 276 | } |
| 277 | if (last60MinCheck + 3600 < now) { |
| 278 | afs_Trace1(afs_iclSetp, CM_TRACE_PROBEVOLUME, ICL_TYPE_INT32,(((afs_iclSetp) && (afs_iclSetp->states & 2)) ? afs_icl_Event1(afs_iclSetp, (701087798L), (1<<24)+((7) <<18), (long)(3600)) : 0) |
| 279 | 3600)(((afs_iclSetp) && (afs_iclSetp->states & 2)) ? afs_icl_Event1(afs_iclSetp, (701087798L), (1<<24)+((7) <<18), (long)(3600)) : 0); |
| 280 | afs_CheckRootVolume(); |
| 281 | #if AFS_GCPAGS0 |
| 282 | if (afs_gcpags == AFS_GCPAGS_OK) { |
| 283 | afs_int32 didany; |
| 284 | afs_GCPAGs(&didany); |
| 285 | } |
| 286 | #endif |
| 287 | last60MinCheck = now; |
| 288 | } |
| 289 | if (afs_initState < 300) { /* while things ain't rosy */ |
| 290 | code = afs_CheckRootVolume(); |
| 291 | if (code == 0) |
| 292 | afs_initState = 300; /* succeeded */ |
| 293 | if (afs_initState < 200) |
| 294 | afs_initState = 200; /* tried once */ |
| 295 | afs_osi_Wakeup(&afs_initState); |
| 296 | } |
| 297 | |
| 298 | /* 18285 is because we're trying to divide evenly into 128, that is, |
| 299 | * CBSlotLen, while staying just under 20 seconds. If CBSlotLen |
| 300 | * changes, should probably change this interval, too. |
| 301 | * Some of the preceding actions may take quite some time, so we |
| 302 | * might not want to wait the entire interval */ |
| 303 | now = 18285 - (osi_Time()time_second - now); |
| 304 | if (now > 0) { |
| 305 | afs_osi_Wait(now, &AFS_WaitHandler, 0); |
| 306 | } |
| 307 | |
| 308 | if (afs_termState == AFSOP_STOP_AFS211) { |
| 309 | if (afs_CheckServerDaemonStarted) |
| 310 | afs_termState = AFSOP_STOP_CS216; |
| 311 | else |
| 312 | afs_termState = AFSOP_STOP_BKG212; |
| 313 | afs_osi_Wakeup(&afs_termState); |
| 314 | return; |
| 315 | } |
| 316 | } |
| 317 | } |
| 318 | |
| 319 | int |
| 320 | afs_CheckRootVolume(void) |
| 321 | { |
| 322 | char rootVolName[32]; |
| 323 | struct volume *tvp = NULL((void *)0); |
| 324 | int usingDynroot = afs_GetDynrootEnable(); |
| 325 | int localcell; |
| 326 | |
| 327 | AFS_STATCNT(afs_CheckRootVolume)((afs_cmstats.callInfo.C_afs_CheckRootVolume)++); |
| 328 | if (*afs_rootVolumeName == 0) { |
| 329 | strcpy(rootVolName, "root.afs"); |
| 330 | } else { |
| 331 | strcpy(rootVolName, afs_rootVolumeName); |
| 332 | } |
| 333 | |
| 334 | if (usingDynroot) { |
| 335 | afs_GetDynrootFid(&afs_rootFid); |
| 336 | tvp = afs_GetVolume(&afs_rootFid, NULL((void *)0), READ_LOCK1); |
| 337 | } else { |
| 338 | struct cell *lc = afs_GetPrimaryCell(READ_LOCK1); |
| 339 | |
| 340 | if (!lc) |
| 341 | return ENOENT2; |
| 342 | localcell = lc->cellNum; |
| 343 | afs_PutCell(lc, READ_LOCK); |
| 344 | tvp = afs_GetVolumeByName(rootVolName, localcell, 1, NULL((void *)0), READ_LOCK1); |
| 345 | if (!tvp) { |
| 346 | char buf[128]; |
| 347 | int len = strlen(rootVolName); |
| 348 | |
| 349 | if ((len < 9) || strcmp(&rootVolName[len - 9], ".readonly")) { |
| 350 | strcpy(buf, rootVolName); |
| 351 | afs_strcat(buf, ".readonly")strcat((buf), (".readonly")); |
| 352 | tvp = afs_GetVolumeByName(buf, localcell, 1, NULL((void *)0), READ_LOCK1); |
| 353 | } |
| 354 | } |
| 355 | if (tvp) { |
| 356 | int volid = (tvp->roVol ? tvp->roVol : tvp->volume); |
| 357 | afs_rootFid.Cell = localcell; |
| 358 | if (afs_rootFid.Fid.Volume && afs_rootFid.Fid.Volume != volid |
| 359 | && afs_globalVp) { |
| 360 | /* If we had a root fid before and it changed location we reset |
| 361 | * the afs_globalVp so that it will be reevaluated. |
| 362 | * Just decrement the reference count. This only occurs during |
| 363 | * initial cell setup and can panic the machine if we set the |
| 364 | * count to zero and fs checkv is executed when the current |
| 365 | * directory is /afs. |
| 366 | */ |
| 367 | #ifdef AFS_LINUX20_ENV |
| 368 | { |
| 369 | struct vrequest treq; |
| 370 | struct vattr vattr; |
| 371 | cred_t *credp; |
| 372 | struct dentry *dp; |
| 373 | struct vcache *vcp; |
| 374 | |
| 375 | afs_rootFid.Fid.Volume = volid; |
| 376 | afs_rootFid.Fid.Vnode = 1; |
| 377 | afs_rootFid.Fid.Unique = 1; |
| 378 | |
| 379 | credp = crref(); |
| 380 | if (afs_InitReq(&treq, credp)) |
| 381 | goto out; |
| 382 | vcp = afs_GetVCache(&afs_rootFid, &treq, NULL((void *)0), NULL((void *)0)); |
| 383 | if (!vcp) |
| 384 | goto out; |
| 385 | afs_getattr(vcp, &vattr, credp); |
| 386 | afs_fill_inode(AFSTOV(vcp)((vcp)->v), &vattr); |
| 387 | |
| 388 | dp = d_find_alias(AFSTOV(afs_globalVp)((afs_globalVp)->v)); |
| 389 | |
| 390 | #if defined(AFS_LINUX24_ENV) |
| 391 | #if defined(HAVE_DCACHE_LOCK) |
| 392 | spin_lock(&dcache_lock); |
| 393 | #else |
| 394 | spin_lock(&AFSTOV(vcp)((vcp)->v)->i_lock); |
| 395 | #endif |
| 396 | #if defined(AFS_LINUX26_ENV) |
| 397 | spin_lock(&dp->d_lock); |
| 398 | #endif |
| 399 | #endif |
| 400 | list_del_init(&dp->d_alias); |
| 401 | list_add(&dp->d_alias, &(AFSTOV(vcp)((vcp)->v)->i_dentry)); |
| 402 | dp->d_inode = AFSTOV(vcp)((vcp)->v); |
| 403 | #if defined(AFS_LINUX24_ENV) |
| 404 | #if defined(AFS_LINUX26_ENV) |
| 405 | spin_unlock(&dp->d_lock); |
| 406 | #endif |
| 407 | #if defined(HAVE_DCACHE_LOCK) |
| 408 | spin_unlock(&dcache_lock); |
| 409 | #else |
| 410 | spin_unlock(&AFSTOV(vcp)((vcp)->v)->i_lock); |
| 411 | #endif |
| 412 | #endif |
| 413 | dput(dp); |
| 414 | |
| 415 | AFS_FAST_RELE(afs_globalVp)do { do { (void)0; _mtx_unlock_flags(((&afs_global_mtx)), (0), "/home/wollman/openafs/src/afs/afs_daemons.c", 415); } while (0); do { vrele(((afs_globalVp)->v)); } while(0);; do { ( void)0; _mtx_lock_flags(((&afs_global_mtx)), (0), "/home/wollman/openafs/src/afs/afs_daemons.c" , 415); (void)0; } while (0); } while (0); |
| 416 | afs_globalVp = vcp; |
| 417 | out: |
| 418 | crfree(credp); |
| 419 | } |
| 420 | #else |
| 421 | #ifdef AFS_DARWIN80_ENV |
| 422 | afs_PutVCache(afs_globalVp); |
| 423 | #else |
| 424 | AFS_FAST_RELE(afs_globalVp)do { do { (void)0; _mtx_unlock_flags(((&afs_global_mtx)), (0), "/home/wollman/openafs/src/afs/afs_daemons.c", 424); } while (0); do { vrele(((afs_globalVp)->v)); } while(0);; do { ( void)0; _mtx_lock_flags(((&afs_global_mtx)), (0), "/home/wollman/openafs/src/afs/afs_daemons.c" , 424); (void)0; } while (0); } while (0); |
| 425 | #endif |
| 426 | afs_globalVp = 0; |
| 427 | #endif |
| 428 | } |
| 429 | afs_rootFid.Fid.Volume = volid; |
| 430 | afs_rootFid.Fid.Vnode = 1; |
| 431 | afs_rootFid.Fid.Unique = 1; |
| 432 | } |
| 433 | } |
| 434 | if (tvp) { |
| 435 | afs_initState = 300; /* won */ |
| 436 | afs_osi_Wakeup(&afs_initState); |
| 437 | afs_PutVolume(tvp, READ_LOCK)((tvp)->refCount--); |
| 438 | } |
| 439 | if (afs_rootFid.Fid.Volume) |
| 440 | return 0; |
| 441 | else |
| 442 | return ENOENT2; |
| 443 | } |
| 444 | |
| 445 | /* ptr_parm 0 is the pathname, size_parm 0 to the fetch is the chunk number */ |
| 446 | static void |
| 447 | BPath(struct brequest *ab) |
| 448 | { |
| 449 | struct dcache *tdc = NULL((void *)0); |
| 450 | struct vcache *tvc = NULL((void *)0); |
| 451 | struct vnode *tvn = NULL((void *)0); |
| 452 | #ifdef AFS_LINUX22_ENV |
| 453 | struct dentry *dp = NULL((void *)0); |
| 454 | #endif |
| 455 | afs_size_t offset, len; |
| 456 | struct vrequest treq; |
| 457 | afs_int32 code; |
| 458 | |
| 459 | AFS_STATCNT(BPath)((afs_cmstats.callInfo.C_BPath)++); |
| 460 | if ((code = afs_InitReq(&treq, ab->cred))) |
| 461 | return; |
| 462 | AFS_GUNLOCK()do { (void)0; _mtx_unlock_flags(((&afs_global_mtx)), (0), "/home/wollman/openafs/src/afs/afs_daemons.c", 462); } while (0); |
| 463 | #ifdef AFS_LINUX22_ENV |
| 464 | code = gop_lookupnameosi_lookupname((char *)ab->ptr_parm[0], AFS_UIOSYSUIO_SYSSPACE, 1, &dp); |
| 465 | if (dp) |
| 466 | tvn = (struct vnode *)dp->d_inode; |
| 467 | #else |
| 468 | code = gop_lookupnameosi_lookupname((char *)ab->ptr_parm[0], AFS_UIOSYSUIO_SYSSPACE, 1, &tvn); |
| 469 | #endif |
| 470 | AFS_GLOCK()do { (void)0; _mtx_lock_flags(((&afs_global_mtx)), (0), "/home/wollman/openafs/src/afs/afs_daemons.c" , 470); (void)0; } while (0); |
| 471 | osi_FreeLargeSpace((char *)ab->ptr_parm[0]); /* free path name buffer here */ |
| 472 | if (code) |
| 473 | return; |
| 474 | /* now path may not have been in afs, so check that before calling our cache manager */ |
| 475 | if (!tvn || !IsAfsVnode(tvn)((tvn)->v_op == &afs_vnodeops)) { |
| 476 | /* release it and give up */ |
| 477 | if (tvn) { |
| 478 | #ifdef AFS_LINUX22_ENV |
| 479 | dput(dp); |
| 480 | #else |
| 481 | AFS_RELE(tvn)do { do { (void)0; _mtx_unlock_flags(((&afs_global_mtx)), (0), "/home/wollman/openafs/src/afs/afs_daemons.c", 481); } while (0); do { vrele(tvn); } while(0);; do { (void)0; _mtx_lock_flags (((&afs_global_mtx)), (0), "/home/wollman/openafs/src/afs/afs_daemons.c" , 481); (void)0; } while (0); } while (0); |
| 482 | #endif |
| 483 | } |
| 484 | return; |
| 485 | } |
| 486 | tvc = VTOAFS(tvn)((struct vcache *)(tvn)->v_data); |
| 487 | /* here we know its an afs vnode, so we can get the data for the chunk */ |
| 488 | tdc = afs_GetDCache(tvc, ab->size_parm[0], &treq, &offset, &len, 1); |
| 489 | if (tdc) { |
| 490 | afs_PutDCache(tdc); |
| 491 | } |
| 492 | #ifdef AFS_LINUX22_ENV |
| 493 | dput(dp); |
| 494 | #else |
| 495 | AFS_RELE(tvn)do { do { (void)0; _mtx_unlock_flags(((&afs_global_mtx)), (0), "/home/wollman/openafs/src/afs/afs_daemons.c", 495); } while (0); do { vrele(tvn); } while(0);; do { (void)0; _mtx_lock_flags (((&afs_global_mtx)), (0), "/home/wollman/openafs/src/afs/afs_daemons.c" , 495); (void)0; } while (0); } while (0); |
| 496 | #endif |
| 497 | } |
| 498 | |
| 499 | /* size_parm 0 to the fetch is the chunk number, |
| 500 | * ptr_parm 0 is the dcache entry to wakeup, |
| 501 | * size_parm 1 is true iff we should release the dcache entry here. |
| 502 | */ |
| 503 | static void |
| 504 | BPrefetch(struct brequest *ab) |
| 505 | { |
| 506 | struct dcache *tdc; |
| 507 | struct vcache *tvc; |
| 508 | afs_size_t offset, len, abyte, totallen = 0; |
| 509 | struct vrequest treq; |
| 510 | |
| 511 | AFS_STATCNT(BPrefetch)((afs_cmstats.callInfo.C_BPrefetch)++); |
| 512 | if ((len = afs_InitReq(&treq, ab->cred))) |
| 513 | return; |
| 514 | abyte = ab->size_parm[0]; |
| 515 | tvc = ab->vc; |
| 516 | do { |
| 517 | tdc = afs_GetDCache(tvc, abyte, &treq, &offset, &len, 1); |
| 518 | if (tdc) { |
| 519 | afs_PutDCache(tdc); |
| 520 | } |
| 521 | abyte+=len; |
| 522 | totallen += len; |
| 523 | } while ((totallen < afs_preCache) && tdc && (len > 0)); |
| 524 | /* now, dude may be waiting for us to clear DFFetchReq bit; do so. Can't |
| 525 | * use tdc from GetDCache since afs_GetDCache may fail, but someone may |
| 526 | * be waiting for our wakeup anyway. |
| 527 | */ |
| 528 | tdc = (struct dcache *)(ab->ptr_parm[0]); |
| 529 | ObtainSharedLock(&tdc->lock, 640)do { ; if (!(&tdc->lock)->excl_locked) (&tdc-> lock) -> excl_locked = 4; else Afs_Lock_Obtain(&tdc-> lock, 4); (&tdc->lock)->pid_writer = (((__curthread ())->td_proc)->p_pid ); (&tdc->lock)->src_indicator = 640; } while (0); |
| 530 | if (tdc->mflags & DFFetchReq0x10) { |
| 531 | UpgradeSToWLock(&tdc->lock, 641)do { ; if (!(&tdc->lock)->readers_reading) (&tdc ->lock)->excl_locked = 2; else Afs_Lock_Obtain(&tdc ->lock, 6); (&tdc->lock)->pid_writer = (((__curthread ())->td_proc)->p_pid ); (&tdc->lock)->src_indicator = 641; } while (0); |
| 532 | tdc->mflags &= ~DFFetchReq0x10; |
| 533 | ReleaseWriteLock(&tdc->lock)do { ; (&tdc->lock)->excl_locked &= ~2; if ((& tdc->lock)->wait_states) Afs_Lock_ReleaseR(&tdc-> lock); (&tdc->lock)->pid_writer=0; } while (0); |
| 534 | } else { |
| 535 | ReleaseSharedLock(&tdc->lock)do { ; (&tdc->lock)->excl_locked &= ~(4 | 2); if ((&tdc->lock)->wait_states) Afs_Lock_ReleaseR(& tdc->lock); (&tdc->lock)->pid_writer=0; } while ( 0); |
| 536 | } |
| 537 | afs_osi_Wakeup(&tdc->validPos); |
| 538 | if (ab->size_parm[1]) { |
| 539 | afs_PutDCache(tdc); /* put this one back, too */ |
| 540 | } |
| 541 | } |
| 542 | |
| 543 | #if defined(AFS_CACHE_BYPASS) |
| 544 | static void |
| 545 | BPrefetchNoCache(struct brequest *ab) |
| 546 | { |
| 547 | struct vrequest treq; |
| 548 | afs_size_t len; |
| 549 | |
| 550 | if ((len = afs_InitReq(&treq, ab->cred))) |
| 551 | return; |
| 552 | |
| 553 | #ifndef UKERNEL |
| 554 | /* OS-specific prefetch routine */ |
| 555 | afs_PrefetchNoCache(ab->vc, ab->cred, (struct nocache_read_request *) ab->ptr_parm[0]); |
| 556 | #endif |
| 557 | } |
| 558 | #endif |
| 559 | |
| 560 | static void |
| 561 | BStore(struct brequest *ab) |
| 562 | { |
| 563 | struct vcache *tvc; |
| 564 | afs_int32 code; |
| 565 | struct vrequest treq; |
| 566 | #if defined(AFS_SGI_ENV) |
| 567 | struct cred *tmpcred; |
| 568 | #endif |
| 569 | |
| 570 | AFS_STATCNT(BStore)((afs_cmstats.callInfo.C_BStore)++); |
| 571 | if ((code = afs_InitReq(&treq, ab->cred))) |
| 572 | return; |
| 573 | code = 0; |
| 574 | tvc = ab->vc; |
| 575 | #if defined(AFS_SGI_ENV) |
| 576 | /* |
| 577 | * Since StoreOnLastReference can end up calling osi_SyncVM which |
| 578 | * calls into VM code that assumes that u.u_cred has the |
| 579 | * correct credentials, we set our to theirs for this xaction |
| 580 | */ |
| 581 | tmpcred = OSI_GET_CURRENT_CRED(); |
| 582 | OSI_SET_CURRENT_CRED(ab->cred); |
| 583 | |
| 584 | /* |
| 585 | * To avoid recursion since the WriteLock may be released during VM |
| 586 | * operations, we hold the VOP_RWLOCK across this transaction as |
| 587 | * do the other callers of StoreOnLastReference |
| 588 | */ |
| 589 | AFS_RWLOCK((vnode_tstruct vnode *) tvc, 1); |
| 590 | #endif |
| 591 | ObtainWriteLock(&tvc->lock, 209)do { ; if (!(&tvc->lock)->excl_locked && !( &tvc->lock)->readers_reading) (&tvc->lock) -> excl_locked = 2; else Afs_Lock_Obtain(&tvc->lock, 2); (&tvc->lock)->pid_writer = (((__curthread())->td_proc )->p_pid ); (&tvc->lock)->src_indicator = 209; } while (0); |
| 592 | code = afs_StoreOnLastReference(tvc, &treq); |
| 593 | ReleaseWriteLock(&tvc->lock)do { ; (&tvc->lock)->excl_locked &= ~2; if ((& tvc->lock)->wait_states) Afs_Lock_ReleaseR(&tvc-> lock); (&tvc->lock)->pid_writer=0; } while (0); |
| 594 | #if defined(AFS_SGI_ENV) |
| 595 | OSI_SET_CURRENT_CRED(tmpcred); |
| 596 | AFS_RWUNLOCK((vnode_tstruct vnode *) tvc, 1); |
| 597 | #endif |
| 598 | /* now set final return code, and wakeup anyone waiting */ |
| 599 | if ((ab->flags & BUVALID2) == 0) { |
| 600 | ab->code = afs_CheckCode(code, &treq, 43); /* set final code, since treq doesn't go across processes */ |
| 601 | ab->flags |= BUVALID2; |
| 602 | if (ab->flags & BUWAIT4) { |
| 603 | ab->flags &= ~BUWAIT4; |
| 604 | afs_osi_Wakeup(ab); |
| 605 | } |
| 606 | } |
| 607 | } |
| 608 | |
| 609 | /* release a held request buffer */ |
| 610 | void |
| 611 | afs_BRelease(struct brequest *ab) |
| 612 | { |
| 613 | |
| 614 | AFS_STATCNT(afs_BRelease)((afs_cmstats.callInfo.C_afs_BRelease)++); |
| 615 | ObtainWriteLock(&afs_xbrs, 294)do { ; if (!(&afs_xbrs)->excl_locked && !(& afs_xbrs)->readers_reading) (&afs_xbrs) -> excl_locked = 2; else Afs_Lock_Obtain(&afs_xbrs, 2); (&afs_xbrs) ->pid_writer = (((__curthread())->td_proc)->p_pid ); (&afs_xbrs)->src_indicator = 294; } while (0); |
| 616 | if (--ab->refCount <= 0) { |
| 617 | ab->flags = 0; |
| 618 | } |
| 619 | if (afs_brsWaiters) |
| 620 | afs_osi_Wakeup(&afs_brsWaiters); |
| 621 | ReleaseWriteLock(&afs_xbrs)do { ; (&afs_xbrs)->excl_locked &= ~2; if ((&afs_xbrs )->wait_states) Afs_Lock_ReleaseR(&afs_xbrs); (&afs_xbrs )->pid_writer=0; } while (0); |
| 622 | } |
| 623 | |
| 624 | /* return true if bkg fetch daemons are all busy */ |
| 625 | int |
| 626 | afs_BBusy(void) |
| 627 | { |
| 628 | AFS_STATCNT(afs_BBusy)((afs_cmstats.callInfo.C_afs_BBusy)++); |
| 629 | if (afs_brsDaemons > 0) |
| 630 | return 0; |
| 631 | return 1; |
| 632 | } |
| 633 | |
| 634 | struct brequest * |
| 635 | afs_BQueue(short aopcode, struct vcache *avc, |
| 636 | afs_int32 dontwait, afs_int32 ause, afs_ucred_t *acred, |
| 637 | afs_size_t asparm0, afs_size_t asparm1, void *apparm0, |
| 638 | void *apparm1, void *apparm2) |
| 639 | { |
| 640 | int i; |
| 641 | struct brequest *tb; |
| 642 | |
| 643 | AFS_STATCNT(afs_BQueue)((afs_cmstats.callInfo.C_afs_BQueue)++); |
| 644 | ObtainWriteLock(&afs_xbrs, 296)do { ; if (!(&afs_xbrs)->excl_locked && !(& afs_xbrs)->readers_reading) (&afs_xbrs) -> excl_locked = 2; else Afs_Lock_Obtain(&afs_xbrs, 2); (&afs_xbrs) ->pid_writer = (((__curthread())->td_proc)->p_pid ); (&afs_xbrs)->src_indicator = 296; } while (0); |
| 645 | while (1) { |
| 646 | tb = afs_brs; |
| 647 | for (i = 0; i < NBRS15; i++, tb++) { |
| 648 | if (tb->refCount == 0) |
| 649 | break; |
| 650 | } |
| 651 | if (i < NBRS15) { |
| 652 | /* found a buffer */ |
| 653 | tb->opcode = aopcode; |
| 654 | tb->vc = avc; |
| 655 | tb->cred = acred; |
| 656 | crhold(tb->cred); |
| 657 | if (avc) { |
| 658 | AFS_FAST_HOLD(avc)vref((((avc))->v)); |
| 659 | } |
| 660 | tb->refCount = ause + 1; |
| 661 | tb->size_parm[0] = asparm0; |
| 662 | tb->size_parm[1] = asparm1; |
| 663 | tb->ptr_parm[0] = apparm0; |
| 664 | tb->ptr_parm[1] = apparm1; |
| 665 | tb->ptr_parm[2] = apparm2; |
| 666 | tb->flags = 0; |
| 667 | tb->code = 0; |
| 668 | tb->ts = afs_brs_count++; |
| 669 | /* if daemons are waiting for work, wake them up */ |
| 670 | if (afs_brsDaemons > 0) { |
| 671 | afs_osi_Wakeup(&afs_brsDaemons); |
| 672 | } |
| 673 | ReleaseWriteLock(&afs_xbrs)do { ; (&afs_xbrs)->excl_locked &= ~2; if ((&afs_xbrs )->wait_states) Afs_Lock_ReleaseR(&afs_xbrs); (&afs_xbrs )->pid_writer=0; } while (0); |
| 674 | return tb; |
| 675 | } |
| 676 | if (dontwait) { |
| 677 | ReleaseWriteLock(&afs_xbrs)do { ; (&afs_xbrs)->excl_locked &= ~2; if ((&afs_xbrs )->wait_states) Afs_Lock_ReleaseR(&afs_xbrs); (&afs_xbrs )->pid_writer=0; } while (0); |
| 678 | return NULL((void *)0); |
| 679 | } |
| 680 | /* no free buffers, sleep a while */ |
| 681 | afs_brsWaiters++; |
| 682 | ReleaseWriteLock(&afs_xbrs)do { ; (&afs_xbrs)->excl_locked &= ~2; if ((&afs_xbrs )->wait_states) Afs_Lock_ReleaseR(&afs_xbrs); (&afs_xbrs )->pid_writer=0; } while (0); |
| 683 | afs_osi_Sleep(&afs_brsWaiters); |
| 684 | ObtainWriteLock(&afs_xbrs, 301)do { ; if (!(&afs_xbrs)->excl_locked && !(& afs_xbrs)->readers_reading) (&afs_xbrs) -> excl_locked = 2; else Afs_Lock_Obtain(&afs_xbrs, 2); (&afs_xbrs) ->pid_writer = (((__curthread())->td_proc)->p_pid ); (&afs_xbrs)->src_indicator = 301; } while (0); |
| 685 | afs_brsWaiters--; |
| 686 | } |
| 687 | } |
| 688 | |
| 689 | #ifdef AFS_AIX41_ENV |
| 690 | /* AIX 4.1 has a much different sleep/wakeup mechanism available for use. |
| 691 | * The modifications here will work for either a UP or MP machine. |
| 692 | */ |
| 693 | struct buf *afs_asyncbuf = (struct buf *)0; |
| 694 | tid_t afs_asyncbuf_cv = EVENT_NULL; |
| 695 | afs_int32 afs_biodcnt = 0; |
| 696 | |
| 697 | /* in implementing this, I assumed that all external linked lists were |
| 698 | * null-terminated. |
| 699 | * |
| 700 | * Several places in this code traverse a linked list. The algorithm |
| 701 | * used here is probably unfamiliar to most people. Careful examination |
| 702 | * will show that it eliminates an assignment inside the loop, as compared |
| 703 | * to the standard algorithm, at the cost of occasionally using an extra |
| 704 | * variable. |
| 705 | */ |
| 706 | |
| 707 | /* get_bioreq() |
| 708 | * |
| 709 | * This function obtains, and returns, a pointer to a buffer for |
| 710 | * processing by a daemon. It sleeps until such a buffer is available. |
| 711 | * The source of buffers for it is the list afs_asyncbuf (see also |
| 712 | * afs_gn_strategy). This function may be invoked concurrently by |
| 713 | * several processes, that is, several instances of the same daemon. |
| 714 | * afs_gn_strategy, which adds buffers to the list, runs at interrupt |
| 715 | * level, while get_bioreq runs at process level. |
| 716 | * |
| 717 | * Since AIX 4.1 can wake just one process at a time, the separate sleep |
| 718 | * addresses have been removed. |
| 719 | * Note that the kernel_lock is held until the e_sleep_thread() occurs. |
| 720 | * The afs_asyncbuf_lock is primarily used to serialize access between |
| 721 | * process and interrupts. |
| 722 | */ |
| 723 | Simple_lock afs_asyncbuf_lock; |
| 724 | struct buf * |
| 725 | afs_get_bioreq() |
| 726 | { |
| 727 | struct buf *bp = NULL((void *)0); |
| 728 | struct buf *bestbp; |
| 729 | struct buf **bestlbpP, **lbpP; |
| 730 | long bestage, stop; |
| 731 | struct buf *t1P, *t2P; /* temp pointers for list manipulation */ |
| 732 | int oldPriority; |
| 733 | afs_uint32 wait_ret; |
| 734 | struct afs_bioqueue *s; |
| 735 | |
| 736 | /* ??? Does the forward pointer of the returned buffer need to be NULL? |
| 737 | */ |
| 738 | |
| 739 | /* Disable interrupts from the strategy function, and save the |
| 740 | * prior priority level and lock access to the afs_asyncbuf. |
| 741 | */ |
| 742 | AFS_GUNLOCK()do { (void)0; _mtx_unlock_flags(((&afs_global_mtx)), (0), "/home/wollman/openafs/src/afs/afs_daemons.c", 742); } while (0); |
| 743 | oldPriority = disable_lock(INTMAX, &afs_asyncbuf_lock); |
| 744 | |
| 745 | while (1) { |
| 746 | if (afs_asyncbuf) { |
| 747 | /* look for oldest buffer */ |
| 748 | bp = bestbp = afs_asyncbuf; |
| 749 | bestage = (long)bestbp->av_back; |
| 750 | bestlbpP = &afs_asyncbuf; |
| 751 | while (1) { |
| 752 | lbpP = &bp->av_forw; |
| 753 | bp = *lbpP; |
| 754 | if (!bp) |
| 755 | break; |
| 756 | if ((long)bp->av_back - bestage < 0) { |
| 757 | bestbp = bp; |
| 758 | bestlbpP = lbpP; |
| 759 | bestage = (long)bp->av_back; |
| 760 | } |
| 761 | } |
| 762 | bp = bestbp; |
| 763 | *bestlbpP = bp->av_forw; |
| 764 | break; |
| 765 | } else { |
| 766 | /* If afs_asyncbuf is null, it is necessary to go to sleep. |
| 767 | * e_wakeup_one() ensures that only one thread wakes. |
| 768 | */ |
| 769 | int interrupted; |
| 770 | /* The LOCK_HANDLER indicates to e_sleep_thread to only drop the |
| 771 | * lock on an MP machine. |
| 772 | */ |
| 773 | interrupted = |
| 774 | e_sleep_thread(&afs_asyncbuf_cv, &afs_asyncbuf_lock, |
| 775 | LOCK_HANDLER | INTERRUPTIBLE); |
| 776 | if (interrupted == THREAD_INTERRUPTED) { |
| 777 | /* re-enable interrupts from strategy */ |
| 778 | unlock_enable(oldPriority, &afs_asyncbuf_lock); |
| 779 | AFS_GLOCK()do { (void)0; _mtx_lock_flags(((&afs_global_mtx)), (0), "/home/wollman/openafs/src/afs/afs_daemons.c" , 779); (void)0; } while (0); |
| 780 | return (NULL((void *)0)); |
| 781 | } |
| 782 | } /* end of "else asyncbuf is empty" */ |
| 783 | } /* end of "inner loop" */ |
| 784 | |
| 785 | /*assert (bp); */ |
| 786 | |
| 787 | unlock_enable(oldPriority, &afs_asyncbuf_lock); |
| 788 | AFS_GLOCK()do { (void)0; _mtx_lock_flags(((&afs_global_mtx)), (0), "/home/wollman/openafs/src/afs/afs_daemons.c" , 788); (void)0; } while (0); |
| 789 | |
| 790 | /* For the convenience of other code, replace the gnodes in |
| 791 | * the b_vp field of bp and the other buffers on the b_work |
| 792 | * chain with the corresponding vnodes. |
| 793 | * |
| 794 | * ??? what happens to the gnodes? They're not just cut loose, |
| 795 | * are they? |
| 796 | */ |
| 797 | for (t1P = bp;;) { |
| 798 | t2P = (struct buf *)t1P->b_work; |
| 799 | t1P->b_vp = ((struct gnode *)t1P->b_vp)->gn_vnode; |
| 800 | if (!t2P) |
| 801 | break; |
| 802 | |
| 803 | t1P = (struct buf *)t2P->b_work; |
| 804 | t2P->b_vp = ((struct gnode *)t2P->b_vp)->gn_vnode; |
| 805 | if (!t1P) |
| 806 | break; |
| 807 | } |
| 808 | |
| 809 | /* If the buffer does not specify I/O, it may immediately |
| 810 | * be returned to the caller. This condition is detected |
| 811 | * by examining the buffer's flags (the b_flags field). If |
| 812 | * the B_PFPROT bit is set, the buffer represents a protection |
| 813 | * violation, rather than a request for I/O. The remainder |
| 814 | * of the outer loop handles the case where the B_PFPROT bit is clear. |
| 815 | */ |
| 816 | if (bp->b_flags & B_PFPROT) { |
| 817 | return (bp); |
| 818 | } |
| 819 | return (bp); |
| 820 | |
| 821 | } /* end of function get_bioreq() */ |
| 822 | |
| 823 | |
| 824 | /* afs_BioDaemon |
| 825 | * |
| 826 | * This function is the daemon. It is called from the syscall |
| 827 | * interface. Ordinarily, a script or an administrator will run a |
| 828 | * daemon startup utility, specifying the number of I/O daemons to |
| 829 | * run. The utility will fork off that number of processes, |
| 830 | * each making the appropriate syscall, which will cause this |
| 831 | * function to be invoked. |
| 832 | */ |
| 833 | static int afs_initbiod = 0; /* this is self-initializing code */ |
| 834 | int DOvmlock = 0; |
| 835 | int |
| 836 | afs_BioDaemon(afs_int32 nbiods) |
| 837 | { |
| 838 | afs_int32 code, s, pflg = 0; |
| 839 | label_t jmpbuf; |
| 840 | struct buf *bp, *bp1, *tbp1, *tbp2; /* temp pointers only */ |
| 841 | caddr_t tmpaddr; |
| 842 | struct vnode *vp; |
| 843 | struct vcache *vcp; |
| 844 | char tmperr; |
| 845 | if (!afs_initbiod) { |
| 846 | /* XXX ###1 XXX */ |
| 847 | afs_initbiod = 1; |
| 848 | /* pin lock, since we'll be using it in an interrupt. */ |
| 849 | lock_alloc(&afs_asyncbuf_lock, LOCK_ALLOC_PIN, 2, 1); |
| 850 | simple_lock_init(&afs_asyncbuf_lock); |
| 851 | pin(&afs_asyncbuf, sizeof(struct buf *)); |
| 852 | pin(&afs_asyncbuf_cv, sizeof(afs_int32)); |
| 853 | } |
| 854 | |
| 855 | /* Ignore HUP signals... */ |
| 856 | { |
| 857 | sigset_t sigbits, osigbits; |
| 858 | /* |
| 859 | * add SIGHUP to the set of already masked signals |
| 860 | */ |
| 861 | SIGFILLSET(sigbits)do { int __i; for (__i = 0; __i < 4; __i++) (sigbits).__bits [__i] = ~0U; } while (0); /* allow all signals */ |
| 862 | SIGDELSET(sigbits, SIGHUP)((sigbits).__bits[(((1) - 1) >> 5)] &= ~(1 << (((1) - 1) & 31))); /* except SIGHUP */ |
| 863 | limit_sigs(&sigbits, &osigbits); /* and already masked */ |
| 864 | } |
| 865 | /* Main body starts here -- this is an intentional infinite loop, and |
| 866 | * should NEVER exit |
| 867 | * |
| 868 | * Now, the loop will exit if get_bioreq() returns NULL, indicating |
| 869 | * that we've been interrupted. |
| 870 | */ |
| 871 | while (1) { |
| 872 | bp = afs_get_bioreq(); |
| 873 | if (!bp) |
| 874 | break; /* we were interrupted */ |
| 875 | if (code = setjmpx(&jmpbuf)) { |
| 876 | /* This should not have happend, maybe a lack of resources */ |
| 877 | AFS_GUNLOCK()do { (void)0; _mtx_unlock_flags(((&afs_global_mtx)), (0), "/home/wollman/openafs/src/afs/afs_daemons.c", 877); } while (0); |
| 878 | s = disable_lock(INTMAX, &afs_asyncbuf_lock); |
| 879 | for (bp1 = bp; bp; bp = bp1) { |
| 880 | if (bp1) |
| 881 | bp1 = (struct buf *)bp1->b_work; |
| 882 | bp->b_actf = 0; |
| 883 | bp->b_error = code; |
| 884 | bp->b_flags |= B_ERROR; |
| 885 | iodonebiodone(bp); |
| 886 | } |
| 887 | unlock_enable(s, &afs_asyncbuf_lock); |
| 888 | AFS_GLOCK()do { (void)0; _mtx_lock_flags(((&afs_global_mtx)), (0), "/home/wollman/openafs/src/afs/afs_daemons.c" , 888); (void)0; } while (0); |
| 889 | continue; |
| 890 | } |
| 891 | vcp = VTOAFS(bp->b_vp)((struct vcache *)(bp->b_vp)->v_data); |
| 892 | if (bp->b_flags & B_PFSTORE) { /* XXXX */ |
| 893 | ObtainWriteLock(&vcp->lock, 404)do { ; if (!(&vcp->lock)->excl_locked && !( &vcp->lock)->readers_reading) (&vcp->lock) -> excl_locked = 2; else Afs_Lock_Obtain(&vcp->lock, 2); (&vcp->lock)->pid_writer = (((__curthread())->td_proc )->p_pid ); (&vcp->lock)->src_indicator = 404; } while (0); |
| 894 | if (vcp->v.v_gnode->gn_mwrcnt) { |
| 895 | afs_offs_t newlength = |
| 896 | (afs_offs_t) dbtob(bp->b_blkno)((off_t)(bp->b_blkno) << 9) + bp->b_bcount; |
| 897 | if (vcp->f.m.Length < newlength) { |
| 898 | afs_Trace4(afs_iclSetp, CM_TRACE_SETLENGTH,(((afs_iclSetp) && (afs_iclSetp->states & 2)) ? afs_icl_Event4(afs_iclSetp, (701087886L), (1<<24)+((4) <<18)+((1)<<12)+((8)<<6)+(8), (long)("/home/wollman/openafs/src/afs/afs_daemons.c" ), (long)(900), (long)((&vcp->f.m.Length)), (long)((& newlength))) : 0) |
| 899 | ICL_TYPE_STRING, __FILE__, ICL_TYPE_LONG,(((afs_iclSetp) && (afs_iclSetp->states & 2)) ? afs_icl_Event4(afs_iclSetp, (701087886L), (1<<24)+((4) <<18)+((1)<<12)+((8)<<6)+(8), (long)("/home/wollman/openafs/src/afs/afs_daemons.c" ), (long)(900), (long)((&vcp->f.m.Length)), (long)((& newlength))) : 0) |
| 900 | __LINE__, ICL_TYPE_OFFSET,(((afs_iclSetp) && (afs_iclSetp->states & 2)) ? afs_icl_Event4(afs_iclSetp, (701087886L), (1<<24)+((4) <<18)+((1)<<12)+((8)<<6)+(8), (long)("/home/wollman/openafs/src/afs/afs_daemons.c" ), (long)(900), (long)((&vcp->f.m.Length)), (long)((& newlength))) : 0) |
| 901 | ICL_HANDLE_OFFSET(vcp->f.m.Length),(((afs_iclSetp) && (afs_iclSetp->states & 2)) ? afs_icl_Event4(afs_iclSetp, (701087886L), (1<<24)+((4) <<18)+((1)<<12)+((8)<<6)+(8), (long)("/home/wollman/openafs/src/afs/afs_daemons.c" ), (long)(900), (long)((&vcp->f.m.Length)), (long)((& newlength))) : 0) |
| 902 | ICL_TYPE_OFFSET, ICL_HANDLE_OFFSET(newlength))(((afs_iclSetp) && (afs_iclSetp->states & 2)) ? afs_icl_Event4(afs_iclSetp, (701087886L), (1<<24)+((4) <<18)+((1)<<12)+((8)<<6)+(8), (long)("/home/wollman/openafs/src/afs/afs_daemons.c" ), (long)(900), (long)((&vcp->f.m.Length)), (long)((& newlength))) : 0); |
| 903 | vcp->f.m.Length = newlength; |
| 904 | } |
| 905 | } |
| 906 | ReleaseWriteLock(&vcp->lock)do { ; (&vcp->lock)->excl_locked &= ~2; if ((& vcp->lock)->wait_states) Afs_Lock_ReleaseR(&vcp-> lock); (&vcp->lock)->pid_writer=0; } while (0); |
| 907 | } |
| 908 | /* If the buffer represents a protection violation, rather than |
| 909 | * an actual request for I/O, no special action need be taken. |
| 910 | */ |
| 911 | if (bp->b_flags & B_PFPROT) { |
| 912 | iodonebiodone(bp); /* Notify all users of the buffer that we're done */ |
| 913 | clrjmpx(&jmpbuf); |
| 914 | continue; |
| 915 | } |
| 916 | if (DOvmlock) |
| 917 | ObtainWriteLock(&vcp->pvmlock, 211)do { ; if (!(&vcp->pvmlock)->excl_locked && !(&vcp->pvmlock)->readers_reading) (&vcp->pvmlock ) -> excl_locked = 2; else Afs_Lock_Obtain(&vcp->pvmlock , 2); (&vcp->pvmlock)->pid_writer = (((__curthread( ))->td_proc)->p_pid ); (&vcp->pvmlock)->src_indicator = 211; } while (0); |
| 918 | /* |
| 919 | * First map its data area to a region in the current address space |
| 920 | * by calling vm_att with the subspace identifier, and a pointer to |
| 921 | * the data area. vm_att returns a new data area pointer, but we |
| 922 | * also want to hang onto the old one. |
| 923 | */ |
| 924 | tmpaddr = bp->b_baddr; |
| 925 | bp->b_baddr = (caddr_t) vm_att(bp->b_xmemd.subspace_id, tmpaddr); |
| 926 | tmperr = afs_ustrategy(bp); /* temp variable saves offset calculation */ |
| 927 | if (tmperr) { /* in non-error case */ |
| 928 | bp->b_flags |= B_ERROR; /* should other flags remain set ??? */ |
| 929 | bp->b_error = tmperr; |
| 930 | } |
| 931 | |
| 932 | /* Unmap the buffer's data area by calling vm_det. Reset data area |
| 933 | * to the value that we saved above. |
| 934 | */ |
| 935 | vm_det(bp->b_baddr); |
| 936 | bp->b_baddr = tmpaddr; |
| 937 | |
| 938 | /* |
| 939 | * buffer may be linked with other buffers via the b_work field. |
| 940 | * See also afs_gn_strategy. For each buffer in the chain (including |
| 941 | * bp) notify all users of the buffer that the daemon is finished |
| 942 | * using it by calling iodone. |
| 943 | * assumes iodone can modify the b_work field. |
| 944 | */ |
| 945 | for (tbp1 = bp;;) { |
| 946 | tbp2 = (struct buf *)tbp1->b_work; |
| 947 | iodonebiodone(tbp1); |
| 948 | if (!tbp2) |
| 949 | break; |
| 950 | |
| 951 | tbp1 = (struct buf *)tbp2->b_work; |
| 952 | iodonebiodone(tbp2); |
| 953 | if (!tbp1) |
| 954 | break; |
| 955 | } |
| 956 | if (DOvmlock) |
| 957 | ReleaseWriteLock(&vcp->pvmlock)do { ; (&vcp->pvmlock)->excl_locked &= ~2; if ( (&vcp->pvmlock)->wait_states) Afs_Lock_ReleaseR(& vcp->pvmlock); (&vcp->pvmlock)->pid_writer=0; } while (0); /* Unlock the vnode. */ |
| 958 | clrjmpx(&jmpbuf); |
| 959 | } /* infinite loop (unless we're interrupted) */ |
| 960 | } /* end of afs_BioDaemon() */ |
| 961 | |
| 962 | #endif /* AFS_AIX41_ENV */ |
| 963 | |
| 964 | |
| 965 | int afs_nbrs = 0; |
| 966 | static_inlinestatic inline void |
| 967 | afs_BackgroundDaemon_once(void) |
| 968 | { |
| 969 | LOCK_INIT(&afs_xbrs, "afs_xbrs")Lock_Init(&afs_xbrs); |
| 970 | memset(afs_brs, 0, sizeof(afs_brs)); |
| 971 | brsInit = 1; |
| 972 | #if defined (AFS_SGI_ENV) && defined(AFS_SGI_SHORTSTACK) |
| 973 | /* |
| 974 | * steal the first daemon for doing delayed DSlot flushing |
| 975 | * (see afs_GetDownDSlot) |
| 976 | */ |
| 977 | AFS_GUNLOCK()do { (void)0; _mtx_unlock_flags(((&afs_global_mtx)), (0), "/home/wollman/openafs/src/afs/afs_daemons.c", 977); } while (0); |
| 978 | afs_sgidaemon(); |
| 979 | exit(CLD_EXITED1, 0); |
| 980 | #endif |
| 981 | } |
| 982 | |
| 983 | static_inlinestatic inline void |
| 984 | brequest_release(struct brequest *tb) |
| 985 | { |
| 986 | if (tb->vc) { |
| 987 | AFS_RELE(AFSTOV(tb->vc))do { do { (void)0; _mtx_unlock_flags(((&afs_global_mtx)), (0), "/home/wollman/openafs/src/afs/afs_daemons.c", 987); } while (0); do { vrele(((tb->vc)->v)); } while(0);; do { (void )0; _mtx_lock_flags(((&afs_global_mtx)), (0), "/home/wollman/openafs/src/afs/afs_daemons.c" , 987); (void)0; } while (0); } while (0); /* MUST call vnode layer or could lose vnodes */ |
| 988 | tb->vc = NULL((void *)0); |
| 989 | } |
| 990 | if (tb->cred) { |
| 991 | crfree(tb->cred); |
| 992 | tb->cred = (afs_ucred_t *)0; |
| 993 | } |
| 994 | afs_BRelease(tb); /* this grabs and releases afs_xbrs lock */ |
| 995 | } |
| 996 | |
| 997 | #ifdef AFS_DARWIN80_ENV |
| 998 | int |
| 999 | afs_BackgroundDaemon(struct afs_uspc_param *uspc, void *param1, void *param2) |
| 1000 | #else |
| 1001 | void |
| 1002 | afs_BackgroundDaemon(void) |
| 1003 | #endif |
| 1004 | { |
| 1005 | struct brequest *tb; |
| 1006 | int i, foundAny; |
| 1007 | |
| 1008 | AFS_STATCNT(afs_BackgroundDaemon)((afs_cmstats.callInfo.C_afs_BackgroundDaemon)++); |
| 1009 | /* initialize subsystem */ |
| 1010 | if (brsInit == 0) |
| 1011 | /* Irix with "short stack" exits */ |
| 1012 | afs_BackgroundDaemon_once(); |
| 1013 | |
| 1014 | #ifdef AFS_DARWIN80_ENV |
| 1015 | /* If it's a re-entering syscall, complete the request and release */ |
| 1016 | if (uspc->ts > -1) { |
| 1017 | tb = afs_brs; |
| 1018 | for (i = 0; i < NBRS15; i++, tb++) { |
| 1019 | if (tb->ts == uspc->ts) { |
| 1020 | /* copy the userspace status back in */ |
| 1021 | ((struct afs_uspc_param *) tb->ptr_parm[0])->retval = |
| 1022 | uspc->retval; |
| 1023 | /* mark it valid and notify our caller */ |
| 1024 | tb->flags |= BUVALID2; |
| 1025 | if (tb->flags & BUWAIT4) { |
| 1026 | tb->flags &= ~BUWAIT4; |
| 1027 | afs_osi_Wakeup(tb); |
| 1028 | } |
| 1029 | brequest_release(tb); |
| 1030 | break; |
| 1031 | } |
| 1032 | } |
| 1033 | } else { |
| 1034 | afs_osi_MaskUserLoop(); |
| 1035 | #endif |
| 1036 | /* Otherwise it's a new one */ |
| 1037 | afs_nbrs++; |
| 1038 | #ifdef AFS_DARWIN80_ENV |
| 1039 | } |
| 1040 | #endif |
| 1041 | |
| 1042 | ObtainWriteLock(&afs_xbrs, 302)do { ; if (!(&afs_xbrs)->excl_locked && !(& afs_xbrs)->readers_reading) (&afs_xbrs) -> excl_locked = 2; else Afs_Lock_Obtain(&afs_xbrs, 2); (&afs_xbrs) ->pid_writer = (((__curthread())->td_proc)->p_pid ); (&afs_xbrs)->src_indicator = 302; } while (0); |
| 1043 | while (1) { |
| 1044 | int min_ts = 0; |
| 1045 | struct brequest *min_tb = NULL((void *)0); |
| 1046 | |
| 1047 | if (afs_termState == AFSOP_STOP_BKG212) { |
| 1048 | if (--afs_nbrs <= 0) |
| 1049 | afs_termState = AFSOP_STOP_TRUNCDAEMON213; |
| 1050 | ReleaseWriteLock(&afs_xbrs)do { ; (&afs_xbrs)->excl_locked &= ~2; if ((&afs_xbrs )->wait_states) Afs_Lock_ReleaseR(&afs_xbrs); (&afs_xbrs )->pid_writer=0; } while (0); |
| 1051 | afs_osi_Wakeup(&afs_termState); |
| 1052 | #ifdef AFS_DARWIN80_ENV |
| 1053 | return -2; |
| 1054 | #else |
| 1055 | return; |
| 1056 | #endif |
| 1057 | } |
| 1058 | |
| 1059 | /* find a request */ |
| 1060 | tb = afs_brs; |
| 1061 | foundAny = 0; |
| 1062 | for (i = 0; i < NBRS15; i++, tb++) { |
| 1063 | /* look for request with smallest ts */ |
| 1064 | if ((tb->refCount > 0) && !(tb->flags & BSTARTED1)) { |
| 1065 | /* new request, not yet picked up */ |
| 1066 | if ((min_tb && (min_ts - tb->ts > 0)) || !min_tb) { |
| 1067 | min_tb = tb; |
| 1068 | min_ts = tb->ts; |
| 1069 | } |
| 1070 | } |
| 1071 | } |
| 1072 | if ((tb = min_tb)) { |
| 1073 | /* claim and process this request */ |
| 1074 | tb->flags |= BSTARTED1; |
| 1075 | ReleaseWriteLock(&afs_xbrs)do { ; (&afs_xbrs)->excl_locked &= ~2; if ((&afs_xbrs )->wait_states) Afs_Lock_ReleaseR(&afs_xbrs); (&afs_xbrs )->pid_writer=0; } while (0); |
| 1076 | foundAny = 1; |
| 1077 | afs_Trace1(afs_iclSetp, CM_TRACE_BKG1, ICL_TYPE_INT32,(((afs_iclSetp) && (afs_iclSetp->states & 2)) ? afs_icl_Event1(afs_iclSetp, (701087799L), (1<<24)+((7) <<18), (long)(tb->opcode)) : 0) |
| 1078 | tb->opcode)(((afs_iclSetp) && (afs_iclSetp->states & 2)) ? afs_icl_Event1(afs_iclSetp, (701087799L), (1<<24)+((7) <<18), (long)(tb->opcode)) : 0); |
| 1079 | if (tb->opcode == BOP_FETCH1) |
| 1080 | BPrefetch(tb); |
| 1081 | #if defined(AFS_CACHE_BYPASS) |
| 1082 | else if (tb->opcode == BOP_FETCH_NOCACHE) |
| 1083 | BPrefetchNoCache(tb); |
| 1084 | #endif |
| 1085 | else if (tb->opcode == BOP_STORE2) |
| 1086 | BStore(tb); |
| 1087 | else if (tb->opcode == BOP_PATH3) |
| 1088 | BPath(tb); |
| 1089 | #ifdef AFS_DARWIN80_ENV |
| 1090 | else if (tb->opcode == BOP_MOVE) { |
| 1091 | memcpy(uspc, (struct afs_uspc_param *) tb->ptr_parm[0], |
| 1092 | sizeof(struct afs_uspc_param)); |
| 1093 | uspc->ts = tb->ts; |
| 1094 | /* string lengths capped in move vop; copy NUL tho */ |
| 1095 | memcpy(param1, (char *)tb->ptr_parm[1], |
| 1096 | strlen(tb->ptr_parm[1])+1); |
| 1097 | memcpy(param2, (char *)tb->ptr_parm[2], |
| 1098 | strlen(tb->ptr_parm[2])+1); |
| 1099 | return 0; |
| 1100 | } |
| 1101 | #endif |
| 1102 | else |
| 1103 | panic("background bop"); |
| 1104 | brequest_release(tb); |
| 1105 | ObtainWriteLock(&afs_xbrs, 305)do { ; if (!(&afs_xbrs)->excl_locked && !(& afs_xbrs)->readers_reading) (&afs_xbrs) -> excl_locked = 2; else Afs_Lock_Obtain(&afs_xbrs, 2); (&afs_xbrs) ->pid_writer = (((__curthread())->td_proc)->p_pid ); (&afs_xbrs)->src_indicator = 305; } while (0); |
| 1106 | } |
| 1107 | if (!foundAny) { |
| 1108 | /* wait for new request */ |
| 1109 | afs_brsDaemons++; |
| 1110 | ReleaseWriteLock(&afs_xbrs)do { ; (&afs_xbrs)->excl_locked &= ~2; if ((&afs_xbrs )->wait_states) Afs_Lock_ReleaseR(&afs_xbrs); (&afs_xbrs )->pid_writer=0; } while (0); |
| 1111 | afs_osi_Sleep(&afs_brsDaemons); |
| 1112 | ObtainWriteLock(&afs_xbrs, 307)do { ; if (!(&afs_xbrs)->excl_locked && !(& afs_xbrs)->readers_reading) (&afs_xbrs) -> excl_locked = 2; else Afs_Lock_Obtain(&afs_xbrs, 2); (&afs_xbrs) ->pid_writer = (((__curthread())->td_proc)->p_pid ); (&afs_xbrs)->src_indicator = 307; } while (0); |
| 1113 | afs_brsDaemons--; |
| 1114 | } |
| 1115 | } |
| 1116 | #ifdef AFS_DARWIN80_ENV |
| 1117 | return -2; |
| 1118 | #endif |
| 1119 | } |
| 1120 | |
| 1121 | |
| 1122 | void |
| 1123 | shutdown_daemons(void) |
| 1124 | { |
| 1125 | AFS_STATCNT(shutdown_daemons)((afs_cmstats.callInfo.C_shutdown_daemons)++); |
| 1126 | if (afs_cold_shutdown) { |
| 1127 | afs_brsDaemons = brsInit = 0; |
| 1128 | rxepoch_checked = afs_nbrs = 0; |
| 1129 | memset(afs_brs, 0, sizeof(afs_brs)); |
| 1130 | memset(&afs_xbrs, 0, sizeof(afs_lock_t)); |
| 1131 | afs_brsWaiters = 0; |
| 1132 | #ifdef AFS_AIX41_ENV |
| 1133 | lock_free(&afs_asyncbuf_lock); |
| 1134 | unpin(&afs_asyncbuf, sizeof(struct buf *)); |
| 1135 | unpin(&afs_asyncbuf_cv, sizeof(afs_int32)); |
| 1136 | afs_initbiod = 0; |
| 1137 | #endif |
| 1138 | } |
| 1139 | } |
| 1140 | |
| 1141 | #if defined(AFS_SGI_ENV) && defined(AFS_SGI_SHORTSTACK) |
| 1142 | /* |
| 1143 | * sgi - daemon - handles certain operations that otherwise |
| 1144 | * would use up too much kernel stack space |
| 1145 | * |
| 1146 | * This all assumes that since the caller must have the xdcache lock |
| 1147 | * exclusively that the list will never be more than one long |
| 1148 | * and noone else can attempt to add anything until we're done. |
| 1149 | */ |
| 1150 | SV_TYPE afs_sgibksync; |
| 1151 | SV_TYPE afs_sgibkwait; |
| 1152 | lock_t afs_sgibklock; |
| 1153 | struct dcache *afs_sgibklist; |
| 1154 | |
| 1155 | int |
| 1156 | afs_sgidaemon(void) |
| 1157 | { |
| 1158 | int s; |
| 1159 | struct dcache *tdc; |
| 1160 | |
| 1161 | if (afs_sgibklock == NULL((void *)0)) { |
| 1162 | SV_INIT(&afs_sgibksync, "bksync", 0, 0); |
| 1163 | SV_INIT(&afs_sgibkwait, "bkwait", 0, 0); |
| 1164 | SPINLOCK_INIT(&afs_sgibklock, "bklock"); |
| 1165 | } |
| 1166 | s = SPLOCK(afs_sgibklock); |
| 1167 | for (;;) { |
| 1168 | /* wait for something to do */ |
| 1169 | SP_WAIT(afs_sgibklock, s, &afs_sgibksync, PINOD((80) + 8)); |
| 1170 | osi_Assert(afs_sgibklist)(void)((afs_sgibklist) || (osi_AssertFailK( "afs_sgibklist" , "/home/wollman/openafs/src/afs/afs_daemons.c", 1170), 0)); |
| 1171 | |
| 1172 | /* XX will probably need to generalize to real list someday */ |
| 1173 | s = SPLOCK(afs_sgibklock); |
| 1174 | while (afs_sgibklist) { |
| 1175 | tdc = afs_sgibklist; |
| 1176 | afs_sgibklist = NULL((void *)0); |
| 1177 | SPUNLOCK(afs_sgibklock, s); |
| 1178 | AFS_GLOCK()do { (void)0; _mtx_lock_flags(((&afs_global_mtx)), (0), "/home/wollman/openafs/src/afs/afs_daemons.c" , 1178); (void)0; } while (0); |
| 1179 | tdc->dflags &= ~DFEntryMod0x02; |
| 1180 | afs_WriteDCache(tdc, 1); |
| 1181 | AFS_GUNLOCK()do { (void)0; _mtx_unlock_flags(((&afs_global_mtx)), (0), "/home/wollman/openafs/src/afs/afs_daemons.c", 1181); } while (0); |
| 1182 | s = SPLOCK(afs_sgibklock); |
| 1183 | } |
| 1184 | |
| 1185 | /* done all the work - wake everyone up */ |
| 1186 | while (SV_SIGNAL(&afs_sgibkwait)); |
| 1187 | } |
| 1188 | } |
| 1189 | #endif |