Bug Summary

File:afs/afs_dcache.c
Location:line 2215, column 7
Description:Value stored to 'i' is never read

Annotated Source Code

1/*
2 * Copyright 2000, International Business Machines Corporation and others.
3 *$All Rights Reserved.
4 *
5 * This software has been released under the terms of the IBM Public
6 * License. For details, see the LICENSE file in the top-level source
7 * directory or online at http://www.openafs.org/dl/license10.html
8 */
9
10/*
11 * Implements:
12 */
13#include <afsconfig.h>
14#include "afs/param.h"
15
16
17#include "afs/sysincludes.h" /*Standard vendor system headers */
18#include "afsincludes.h" /*AFS-based standard headers */
19#include "afs/afs_stats.h" /* statistics */
20#include "afs/afs_cbqueue.h"
21#include "afs/afs_osidnlc.h"
22
23/* Forward declarations. */
24static void afs_GetDownD(int anumber, int *aneedSpace, afs_int32 buckethint);
25static void afs_FreeDiscardedDCache(void);
26static void afs_DiscardDCache(struct dcache *);
27static void afs_FreeDCache(struct dcache *);
28/* For split cache */
29static afs_int32 afs_DCGetBucket(struct vcache *);
30static void afs_DCAdjustSize(struct dcache *, afs_int32, afs_int32);
31static void afs_DCMoveBucket(struct dcache *, afs_int32, afs_int32);
32static void afs_DCSizeInit(void);
33static afs_int32 afs_DCWhichBucket(afs_int32, afs_int32);
34
35/*
36 * --------------------- Exported definitions ---------------------
37 */
38/* For split cache */
39afs_int32 afs_blocksUsed_0; /*!< 1K blocks in cache - in theory is zero */
40afs_int32 afs_blocksUsed_1; /*!< 1K blocks in cache */
41afs_int32 afs_blocksUsed_2; /*!< 1K blocks in cache */
42afs_int32 afs_pct1 = -1;
43afs_int32 afs_pct2 = -1;
44afs_uint32 afs_tpct1 = 0;
45afs_uint32 afs_tpct2 = 0;
46afs_uint32 splitdcache = 0;
47
48afs_lock_t afs_xdcache; /*!< Lock: alloc new disk cache entries */
49afs_int32 afs_freeDCList; /*!< Free list for disk cache entries */
50afs_int32 afs_freeDCCount; /*!< Count of elts in freeDCList */
51afs_int32 afs_discardDCList; /*!< Discarded disk cache entries */
52afs_int32 afs_discardDCCount; /*!< Count of elts in discardDCList */
53struct dcache *afs_freeDSList; /*!< Free list for disk slots */
54struct dcache *afs_Initial_freeDSList; /*!< Initial list for above */
55afs_dcache_id_t cacheInode; /*!< Inode for CacheItems file */
56struct osi_file *afs_cacheInodep = 0; /*!< file for CacheItems inode */
57struct afs_q afs_DLRU; /*!< dcache LRU */
58afs_int32 afs_dhashsize = 1024;
59afs_int32 *afs_dvhashTbl; /*!< Data cache hash table: hashed by FID + chunk number. */
60afs_int32 *afs_dchashTbl; /*!< Data cache hash table: hashed by FID. */
61afs_int32 *afs_dvnextTbl; /*!< Dcache hash table links */
62afs_int32 *afs_dcnextTbl; /*!< Dcache hash table links */
63struct dcache **afs_indexTable; /*!< Pointers to dcache entries */
64afs_hyper_t *afs_indexTimes; /*!< Dcache entry Access times */
65afs_int32 *afs_indexUnique; /*!< dcache entry Fid.Unique */
66unsigned char *afs_indexFlags; /*!< (only one) Is there data there? */
67afs_hyper_t afs_indexCounter; /*!< Fake time for marking index
68 * entries */
69afs_int32 afs_cacheFiles = 0; /*!< Size of afs_indexTable */
70afs_int32 afs_cacheBlocks; /*!< 1K blocks in cache */
71afs_int32 afs_cacheStats; /*!< Stat entries in cache */
72afs_int32 afs_blocksUsed; /*!< Number of blocks in use */
73afs_int32 afs_blocksDiscarded; /*!<Blocks freed but not truncated */
74afs_int32 afs_fsfragsize = AFS_MIN_FRAGSIZE1023; /*!< Underlying Filesystem minimum unit
75 *of disk allocation usually 1K
76 *this value is (truefrag -1 ) to
77 *save a bunch of subtracts... */
78#ifdef AFS_64BIT_CLIENT1
79#ifdef AFS_VM_RDWR_ENV1
80afs_size_t afs_vmMappingEnd; /* !< For large files (>= 2GB) the VM
81 * mapping an 32bit addressing machines
82 * can only be used below the 2 GB
83 * line. From this point upwards we
84 * must do direct I/O into the cache
85 * files. The value should be on a
86 * chunk boundary. */
87#endif /* AFS_VM_RDWR_ENV */
88#endif /* AFS_64BIT_CLIENT */
89
90/* The following is used to ensure that new dcache's aren't obtained when
91 * the cache is nearly full.
92 */
93int afs_WaitForCacheDrain = 0;
94int afs_TruncateDaemonRunning = 0;
95int afs_CacheTooFull = 0;
96
97afs_int32 afs_dcentries; /*!< In-memory dcache entries */
98
99
100int dcacheDisabled = 0;
101
102struct afs_cacheOps afs_UfsCacheOps = {
103#ifndef HAVE_STRUCT_LABEL_SUPPORT1
104 osi_UFSOpen,
105 osi_UFSTruncate,
106 afs_osi_Read,
107 afs_osi_Write,
108 osi_UFSClose,
109 afs_UFSReadUIO,
110 afs_UFSWriteUIO,
111 afs_UFSGetDSlot,
112 afs_UFSGetVolSlot,
113 afs_UFSHandleLink,
114#else
115 .open = osi_UFSOpen,
116 .truncate = osi_UFSTruncate,
117 .fread = afs_osi_Read,
118 .fwrite = afs_osi_Write,
119 .close = osi_UFSClose,
120 .vreadUIO = afs_UFSReadUIO,
121 .vwriteUIO = afs_UFSWriteUIO,
122 .GetDSlot = afs_UFSGetDSlot,
123 .GetVolSlot = afs_UFSGetVolSlot,
124 .HandleLink = afs_UFSHandleLink,
125#endif
126};
127
128struct afs_cacheOps afs_MemCacheOps = {
129#ifndef HAVE_STRUCT_LABEL_SUPPORT1
130 afs_MemCacheOpen,
131 afs_MemCacheTruncate,
132 afs_MemReadBlk,
133 afs_MemWriteBlk,
134 afs_MemCacheClose,
135 afs_MemReadUIO,
136 afs_MemWriteUIO,
137 afs_MemGetDSlot,
138 afs_MemGetVolSlot,
139 afs_MemHandleLink,
140#else
141 .open = afs_MemCacheOpen,
142 .truncate = afs_MemCacheTruncate,
143 .fread = afs_MemReadBlk,
144 .fwrite = afs_MemWriteBlk,
145 .close = afs_MemCacheClose,
146 .vreadUIO = afs_MemReadUIO,
147 .vwriteUIO = afs_MemWriteUIO,
148 .GetDSlot = afs_MemGetDSlot,
149 .GetVolSlot = afs_MemGetVolSlot,
150 .HandleLink = afs_MemHandleLink,
151#endif
152};
153
154int cacheDiskType; /*Type of backing disk for cache */
155struct afs_cacheOps *afs_cacheType;
156
157/*!
158 * Where is this vcache's entry associated dcache located/
159 * \param avc The vcache entry.
160 * \return Bucket index:
161 * 1 : main
162 * 2 : RO
163 */
164static afs_int32
165afs_DCGetBucket(struct vcache *avc)
166{
167 if (!splitdcache)
168 return 1;
169
170 /* This should be replaced with some sort of user configurable function */
171 if (avc->f.states & CRO0x00000004) {
172 return 2;
173 } else if (avc->f.states & CBackup0x00000002) {
174 return 1;
175 } else {
176 /* RW */
177 }
178 /* main bucket */
179 return 1;
180}
181
182/*!
183 * Readjust a dcache's size.
184 *
185 * \param adc The dcache to be adjusted.
186 * \param oldSize Old size for the dcache.
187 * \param newSize The new size to be adjusted to.
188 *
189 */
190static void
191afs_DCAdjustSize(struct dcache *adc, afs_int32 oldSize, afs_int32 newSize)
192{
193 afs_int32 adjustSize = newSize - oldSize;
194
195 if (!splitdcache)
196 return;
197
198 switch (adc->bucket)
199 {
200 case 0:
201 afs_blocksUsed_0 += adjustSize;
202 afs_stats_cmperf.cacheBucket0_Discarded += oldSize;
203 break;
204 case 1:
205 afs_blocksUsed_1 += adjustSize;
206 afs_stats_cmperf.cacheBucket1_Discarded += oldSize;
207 break;
208 case 2:
209 afs_blocksUsed_2 += adjustSize;
210 afs_stats_cmperf.cacheBucket2_Discarded += oldSize;
211 break;
212 }
213
214 return;
215}
216
217/*!
218 * Move a dcache from one bucket to another.
219 *
220 * \param adc Operate on this dcache.
221 * \param size Size in bucket (?).
222 * \param newBucket Destination bucket.
223 *
224 */
225static void
226afs_DCMoveBucket(struct dcache *adc, afs_int32 size, afs_int32 newBucket)
227{
228 if (!splitdcache)
229 return;
230
231 /* Substract size from old bucket. */
232 switch (adc->bucket)
233 {
234 case 0:
235 afs_blocksUsed_0 -= size;
236 break;
237 case 1:
238 afs_blocksUsed_1 -= size;
239 break;
240 case 2:
241 afs_blocksUsed_2 -= size;
242 break;
243 }
244
245 /* Set new bucket and increase destination bucket size. */
246 adc->bucket = newBucket;
247
248 switch (adc->bucket)
249 {
250 case 0:
251 afs_blocksUsed_0 += size;
252 break;
253 case 1:
254 afs_blocksUsed_1 += size;
255 break;
256 case 2:
257 afs_blocksUsed_2 += size;
258 break;
259 }
260
261 return;
262}
263
264/*!
265 * Init split caches size.
266 */
267static void
268afs_DCSizeInit(void)
269{
270 afs_blocksUsed_0 = afs_blocksUsed_1 = afs_blocksUsed_2 = 0;
271}
272
273
274/*!
275 * \param phase
276 * \param bucket
277 */
278static afs_int32
279afs_DCWhichBucket(afs_int32 phase, afs_int32 bucket)
280{
281 if (!splitdcache)
282 return 0;
283
284 afs_pct1 = afs_blocksUsed_1 / (afs_cacheBlocks / 100);
285 afs_pct2 = afs_blocksUsed_2 / (afs_cacheBlocks / 100);
286
287 /* Short cut: if we don't know about it, try to kill it */
288 if (phase < 2 && afs_blocksUsed_0)
289 return 0;
290
291 if (afs_pct1 > afs_tpct1)
292 return 1;
293 if (afs_pct2 > afs_tpct2)
294 return 2;
295 return 0; /* unlikely */
296}
297
298
299/*!
300 * Warn about failing to store a file.
301 *
302 * \param acode Associated error code.
303 * \param avolume Volume involved.
304 * \param aflags How to handle the output:
305 * aflags & 1: Print out on console
306 * aflags & 2: Print out on controlling tty
307 *
308 * \note Environment: Call this from close call when vnodeops is RCS unlocked.
309 */
310
311void
312afs_StoreWarn(afs_int32 acode, afs_int32 avolume,
313 afs_int32 aflags)
314{
315 static char problem_fmt[] =
316 "afs: failed to store file in volume %d (%s)\n";
317 static char problem_fmt_w_error[] =
318 "afs: failed to store file in volume %d (error %d)\n";
319 static char netproblems[] = "network problems";
320 static char partfull[] = "partition full";
321 static char overquota[] = "over quota";
322
323 AFS_STATCNT(afs_StoreWarn)((afs_cmstats.callInfo.C_afs_StoreWarn)++);
324 if (acode < 0) {
325 /*
326 * Network problems
327 */
328 if (aflags & 1)
329 afs_warn(problem_fmt, avolume, netproblems);
330 if (aflags & 2)
331 afs_warnuser(problem_fmt, avolume, netproblems);
332 } else if (acode == ENOSPC28) {
333 /*
334 * Partition full
335 */
336 if (aflags & 1)
337 afs_warn(problem_fmt, avolume, partfull);
338 if (aflags & 2)
339 afs_warnuser(problem_fmt, avolume, partfull);
340 } else
341#ifdef EDQUOT69
342 /* EDQUOT doesn't exist on solaris and won't be sent by the server.
343 * Instead ENOSPC will be sent...
344 */
345 if (acode == EDQUOT69) {
346 /*
347 * Quota exceeded
348 */
349 if (aflags & 1)
350 afs_warn(problem_fmt, avolume, overquota);
351 if (aflags & 2)
352 afs_warnuser(problem_fmt, avolume, overquota);
353 } else
354#endif
355 {
356 /*
357 * Unknown error
358 */
359 if (aflags & 1)
360 afs_warn(problem_fmt_w_error, avolume, acode);
361 if (aflags & 2)
362 afs_warnuser(problem_fmt_w_error, avolume, acode);
363 }
364} /*afs_StoreWarn */
365
366/*!
367 * Try waking up truncation daemon, if it's worth it.
368 */
369void
370afs_MaybeWakeupTruncateDaemon(void)
371{
372 if (!afs_CacheTooFull && afs_CacheIsTooFull()(afs_blocksUsed - afs_blocksDiscarded > ((afs_cacheBlocks &
0xffe00000) ? ((afs_cacheBlocks) / 100 * (95)) : ((95) * (afs_cacheBlocks
) / 100)) || afs_freeDCCount - afs_discardDCCount < ((afs_cacheBlocks
& 0xffe00000) ? ((afs_cacheFiles) / 100 * (100 - 95)) : (
(100 - 95) * (afs_cacheFiles) / 100)))
) {
373 afs_CacheTooFull = 1;
374 if (!afs_TruncateDaemonRunning)
375 afs_osi_Wakeup((int *)afs_CacheTruncateDaemon);
376 } else if (!afs_TruncateDaemonRunning
377 && afs_blocksDiscarded > CM_MAXDISCARDEDCHUNKS16) {
378 afs_osi_Wakeup((int *)afs_CacheTruncateDaemon);
379 }
380}
381
382/*!
383 * /struct CTD_stats
384 *
385 * Keep statistics on run time for afs_CacheTruncateDaemon. This is a
386 * struct so we need only export one symbol for AIX.
387 */
388static struct CTD_stats {
389 osi_timeval_t CTD_beforeSleep;
390 osi_timeval_t CTD_afterSleep;
391 osi_timeval_t CTD_sleepTime;
392 osi_timeval_t CTD_runTime;
393 int CTD_nSleeps;
394} CTD_stats;
395
396u_int afs_min_cache = 0;
397
398/*!
399 * Keeps the cache clean and free by truncating uneeded files, when used.
400 * \param
401 * \return
402 */
403void
404afs_CacheTruncateDaemon(void)
405{
406 osi_timeval_t CTD_tmpTime;
407 u_int counter;
408 u_int cb_lowat;
409 u_int dc_hiwat =
410 PERCENT((100 - CM_DCACHECOUNTFREEPCT + CM_DCACHEEXTRAPCT), afs_cacheFiles)((afs_cacheBlocks & 0xffe00000) ? ((afs_cacheFiles) / 100
* ((100 - 95 + 5))) : (((100 - 95 + 5)) * (afs_cacheFiles) /
100))
;
411 afs_min_cache =
412 (((10 * AFS_CHUNKSIZE(0)((0 < afs_FirstCSize) ? afs_FirstCSize : afs_OtherCSize)) + afs_fsfragsize) & ~afs_fsfragsize) >> 10;
413
414 osi_GetuTime(&CTD_stats.CTD_afterSleep)microtime(&CTD_stats.CTD_afterSleep);
415 afs_TruncateDaemonRunning = 1;
416 while (1) {
417 cb_lowat = PERCENT((CM_DCACHESPACEFREEPCT - CM_DCACHEEXTRAPCT), afs_cacheBlocks)((afs_cacheBlocks & 0xffe00000) ? ((afs_cacheBlocks) / 100
* ((90 - 5))) : (((90 - 5)) * (afs_cacheBlocks) / 100))
;
418 ObtainWriteLock(&afs_xdcache, 266)do { ; if (!(&afs_xdcache)->excl_locked && !(&
afs_xdcache)->readers_reading) (&afs_xdcache) -> excl_locked
= 2; else Afs_Lock_Obtain(&afs_xdcache, 2); (&afs_xdcache
)->pid_writer = (((__curthread())->td_proc)->p_pid )
; (&afs_xdcache)->src_indicator = 266; } while (0)
;
419 if (afs_CacheTooFull) {
420 int space_needed, slots_needed;
421 /* if we get woken up, we should try to clean something out */
422 for (counter = 0; counter < 10; counter++) {
423 space_needed =
424 afs_blocksUsed - afs_blocksDiscarded - cb_lowat;
425 slots_needed =
426 dc_hiwat - afs_freeDCCount - afs_discardDCCount;
427 afs_GetDownD(slots_needed, &space_needed, 0);
428 if ((space_needed <= 0) && (slots_needed <= 0)) {
429 break;
430 }
431 if (afs_termState == AFSOP_STOP_TRUNCDAEMON213)
432 break;
433 }
434 if (!afs_CacheIsTooFull()(afs_blocksUsed - afs_blocksDiscarded > ((afs_cacheBlocks &
0xffe00000) ? ((afs_cacheBlocks) / 100 * (95)) : ((95) * (afs_cacheBlocks
) / 100)) || afs_freeDCCount - afs_discardDCCount < ((afs_cacheBlocks
& 0xffe00000) ? ((afs_cacheFiles) / 100 * (100 - 95)) : (
(100 - 95) * (afs_cacheFiles) / 100)))
)
435 afs_CacheTooFull = 0;
436 } /* end of cache cleanup */
437 ReleaseWriteLock(&afs_xdcache)do { ; (&afs_xdcache)->excl_locked &= ~2; if ((&
afs_xdcache)->wait_states) Afs_Lock_ReleaseR(&afs_xdcache
); (&afs_xdcache)->pid_writer=0; } while (0)
;
438
439 /*
440 * This is a defensive check to try to avoid starving threads
441 * that may need the global lock so thay can help free some
442 * cache space. If this thread won't be sleeping or truncating
443 * any cache files then give up the global lock so other
444 * threads get a chance to run.
445 */
446 if ((afs_termState != AFSOP_STOP_TRUNCDAEMON213) && afs_CacheTooFull
447 && (!afs_blocksDiscarded || afs_WaitForCacheDrain)) {
448 afs_osi_Wait(100, 0, 0); /* 100 milliseconds */
449 }
450
451 /*
452 * This is where we free the discarded cache elements.
453 */
454 while (afs_blocksDiscarded && !afs_WaitForCacheDrain
455 && (afs_termState != AFSOP_STOP_TRUNCDAEMON213)) {
456 afs_FreeDiscardedDCache();
457 }
458
459 /* See if we need to continue to run. Someone may have
460 * signalled us while we were executing.
461 */
462 if (!afs_WaitForCacheDrain && !afs_CacheTooFull
463 && (afs_termState != AFSOP_STOP_TRUNCDAEMON213)) {
464 /* Collect statistics on truncate daemon. */
465 CTD_stats.CTD_nSleeps++;
466 osi_GetuTime(&CTD_stats.CTD_beforeSleep)microtime(&CTD_stats.CTD_beforeSleep);
467 afs_stats_GetDiff(CTD_tmpTime, CTD_stats.CTD_afterSleep,{ if (CTD_stats.CTD_beforeSleep.tv_usec < CTD_stats.CTD_afterSleep
.tv_usec) { CTD_stats.CTD_beforeSleep.tv_usec += 1000000; CTD_stats
.CTD_beforeSleep.tv_sec -= 1; } CTD_tmpTime.tv_sec = CTD_stats
.CTD_beforeSleep.tv_sec - CTD_stats.CTD_afterSleep.tv_sec; CTD_tmpTime
.tv_usec = CTD_stats.CTD_beforeSleep.tv_usec - CTD_stats.CTD_afterSleep
.tv_usec; }
468 CTD_stats.CTD_beforeSleep){ if (CTD_stats.CTD_beforeSleep.tv_usec < CTD_stats.CTD_afterSleep
.tv_usec) { CTD_stats.CTD_beforeSleep.tv_usec += 1000000; CTD_stats
.CTD_beforeSleep.tv_sec -= 1; } CTD_tmpTime.tv_sec = CTD_stats
.CTD_beforeSleep.tv_sec - CTD_stats.CTD_afterSleep.tv_sec; CTD_tmpTime
.tv_usec = CTD_stats.CTD_beforeSleep.tv_usec - CTD_stats.CTD_afterSleep
.tv_usec; }
;
469 afs_stats_AddTo(CTD_stats.CTD_runTime, CTD_tmpTime){ CTD_stats.CTD_runTime.tv_sec += CTD_tmpTime.tv_sec; CTD_stats
.CTD_runTime.tv_usec += CTD_tmpTime.tv_usec; if (CTD_stats.CTD_runTime
.tv_usec > 1000000) { CTD_stats.CTD_runTime.tv_usec -= 1000000
; CTD_stats.CTD_runTime.tv_sec++; } }
;
470
471 afs_TruncateDaemonRunning = 0;
472 afs_osi_Sleep((int *)afs_CacheTruncateDaemon);
473 afs_TruncateDaemonRunning = 1;
474
475 osi_GetuTime(&CTD_stats.CTD_afterSleep)microtime(&CTD_stats.CTD_afterSleep);
476 afs_stats_GetDiff(CTD_tmpTime, CTD_stats.CTD_beforeSleep,{ if (CTD_stats.CTD_afterSleep.tv_usec < CTD_stats.CTD_beforeSleep
.tv_usec) { CTD_stats.CTD_afterSleep.tv_usec += 1000000; CTD_stats
.CTD_afterSleep.tv_sec -= 1; } CTD_tmpTime.tv_sec = CTD_stats
.CTD_afterSleep.tv_sec - CTD_stats.CTD_beforeSleep.tv_sec; CTD_tmpTime
.tv_usec = CTD_stats.CTD_afterSleep.tv_usec - CTD_stats.CTD_beforeSleep
.tv_usec; }
477 CTD_stats.CTD_afterSleep){ if (CTD_stats.CTD_afterSleep.tv_usec < CTD_stats.CTD_beforeSleep
.tv_usec) { CTD_stats.CTD_afterSleep.tv_usec += 1000000; CTD_stats
.CTD_afterSleep.tv_sec -= 1; } CTD_tmpTime.tv_sec = CTD_stats
.CTD_afterSleep.tv_sec - CTD_stats.CTD_beforeSleep.tv_sec; CTD_tmpTime
.tv_usec = CTD_stats.CTD_afterSleep.tv_usec - CTD_stats.CTD_beforeSleep
.tv_usec; }
;
478 afs_stats_AddTo(CTD_stats.CTD_sleepTime, CTD_tmpTime){ CTD_stats.CTD_sleepTime.tv_sec += CTD_tmpTime.tv_sec; CTD_stats
.CTD_sleepTime.tv_usec += CTD_tmpTime.tv_usec; if (CTD_stats.
CTD_sleepTime.tv_usec > 1000000) { CTD_stats.CTD_sleepTime
.tv_usec -= 1000000; CTD_stats.CTD_sleepTime.tv_sec++; } }
;
479 }
480 if (afs_termState == AFSOP_STOP_TRUNCDAEMON213) {
481 afs_termState = AFSOP_STOP_AFSDB218;
482 afs_osi_Wakeup(&afs_termState);
483 break;
484 }
485 }
486}
487
488
489/*!
490 * Make adjustment for the new size in the disk cache entry
491 *
492 * \note Major Assumptions Here:
493 * Assumes that frag size is an integral power of two, less one,
494 * and that this is a two's complement machine. I don't
495 * know of any filesystems which violate this assumption...
496 *
497 * \param adc Ptr to dcache entry.
498 * \param anewsize New size desired.
499 *
500 */
501
502void
503afs_AdjustSize(struct dcache *adc, afs_int32 newSize)
504{
505 afs_int32 oldSize;
506
507 AFS_STATCNT(afs_AdjustSize)((afs_cmstats.callInfo.C_afs_AdjustSize)++);
508
509 adc->dflags |= DFEntryMod0x02;
510 oldSize = ((adc->f.chunkBytes + afs_fsfragsize) ^ afs_fsfragsize) >> 10; /* round up */
511 adc->f.chunkBytes = newSize;
512 if (!newSize)
513 adc->validPos = 0;
514 newSize = ((newSize + afs_fsfragsize) ^ afs_fsfragsize) >> 10; /* round up */
515 afs_DCAdjustSize(adc, oldSize, newSize);
516 if ((newSize > oldSize) && !AFS_IS_DISCONNECTED(afs_is_disconnected)) {
517
518 /* We're growing the file, wakeup the daemon */
519 afs_MaybeWakeupTruncateDaemon();
520 }
521 afs_blocksUsed += (newSize - oldSize);
522 afs_stats_cmperf.cacheBlocksInUse = afs_blocksUsed; /* XXX */
523}
524
525
526/*!
527 * This routine is responsible for moving at least one entry (but up
528 * to some number of them) from the LRU queue to the free queue.
529 *
530 * \param anumber Number of entries that should ideally be moved.
531 * \param aneedSpace How much space we need (1K blocks);
532 *
533 * \note Environment:
534 * The anumber parameter is just a hint; at least one entry MUST be
535 * moved, or we'll panic. We must be called with afs_xdcache
536 * write-locked. We should try to satisfy both anumber and aneedspace,
537 * whichever is more demanding - need to do several things:
538 * 1. only grab up to anumber victims if aneedSpace <= 0, not
539 * the whole set of MAXATONCE.
540 * 2. dynamically choose MAXATONCE to reflect severity of
541 * demand: something like (*aneedSpace >> (logChunk - 9))
542 *
543 * \note N.B. if we're called with aneedSpace <= 0 and anumber > 0, that
544 * indicates that the cache is not properly configured/tuned or
545 * something. We should be able to automatically correct that problem.
546 */
547
548#define MAXATONCE16 16 /* max we can obtain at once */
549static void
550afs_GetDownD(int anumber, int *aneedSpace, afs_int32 buckethint)
551{
552
553 struct dcache *tdc;
554 struct VenusFid *afid;
555 afs_int32 i, j;
556 afs_hyper_t vtime;
557 int skip, phase;
558 struct vcache *tvc;
559 afs_uint32 victims[MAXATONCE16];
560 struct dcache *victimDCs[MAXATONCE16];
561 afs_hyper_t victimTimes[MAXATONCE16]; /* youngest (largest LRU time) first */
562 afs_uint32 victimPtr; /* next free item in victim arrays */
563 afs_hyper_t maxVictimTime; /* youngest (largest LRU time) victim */
564 afs_uint32 maxVictimPtr; /* where it is */
565 int discard;
566 int curbucket;
567
568 AFS_STATCNT(afs_GetDownD)((afs_cmstats.callInfo.C_afs_GetDownD)++);
569
570 if (CheckLock(&afs_xdcache)((&afs_xdcache)->excl_locked? (int) -1 : (int) (&afs_xdcache
)->readers_reading)
!= -1)
571 osi_Panic("getdownd nolock");
572 /* decrement anumber first for all dudes in free list */
573 /* SHOULD always decrement anumber first, even if aneedSpace >0,
574 * because we should try to free space even if anumber <=0 */
575 if (!aneedSpace || *aneedSpace <= 0) {
576 anumber -= afs_freeDCCount;
577 if (anumber <= 0) {
578 return; /* enough already free */
579 }
580 }
581
582 /* bounds check parameter */
583 if (anumber > MAXATONCE16)
584 anumber = MAXATONCE16; /* all we can do */
585
586 /* rewrite so phases include a better eligiblity for gc test*/
587 /*
588 * The phase variable manages reclaims. Set to 0, the first pass,
589 * we don't reclaim active entries, or other than target bucket.
590 * Set to 1, we reclaim even active ones in target bucket.
591 * Set to 2, we reclaim any inactive one.
592 * Set to 3, we reclaim even active ones. On Solaris, we also reclaim
593 * entries whose corresponding vcache has a nonempty multiPage list, when
594 * possible.
595 */
596 if (splitdcache) {
597 phase = 0;
598 } else {
599 phase = 4;
600 }
601
602 for (i = 0; i < afs_cacheFiles; i++)
603 /* turn off all flags */
604 afs_indexFlags[i] &= ~IFFlag8;
605
606 while (anumber > 0 || (aneedSpace && *aneedSpace > 0)) {
607 /* find oldest entries for reclamation */
608 maxVictimPtr = victimPtr = 0;
609 hzero(maxVictimTime)((maxVictimTime).low = 0, (maxVictimTime).high = 0);
610 curbucket = afs_DCWhichBucket(phase, buckethint);
611 /* select victims from access time array */
612 for (i = 0; i < afs_cacheFiles; i++) {
613 if (afs_indexFlags[i] & (IFDataMod4 | IFFree2 | IFDiscarded64)) {
614 /* skip if dirty or already free */
615 continue;
616 }
617 tdc = afs_indexTable[i];
618 if (tdc && (curbucket != tdc->bucket) && (phase < 4))
619 {
620 /* Wrong bucket; can't use it! */
621 continue;
622 }
623 if (tdc && (tdc->refCount != 0)) {
624 /* Referenced; can't use it! */
625 continue;
626 }
627 hset(vtime, afs_indexTimes[i])((vtime) = (afs_indexTimes[i]));
628
629 /* if we've already looked at this one, skip it */
630 if (afs_indexFlags[i] & IFFlag8)
631 continue;
632
633 if (victimPtr < MAXATONCE16) {
634 /* if there's at least one free victim slot left */
635 victims[victimPtr] = i;
636 hset(victimTimes[victimPtr], vtime)((victimTimes[victimPtr]) = (vtime));
637 if (hcmp(vtime, maxVictimTime)((vtime).high<(maxVictimTime).high? -1 : ((vtime).high >
(maxVictimTime).high? 1 : ((vtime).low <(maxVictimTime).low
? -1 : ((vtime).low > (maxVictimTime).low? 1 : 0))))
> 0) {
638 hset(maxVictimTime, vtime)((maxVictimTime) = (vtime));
639 maxVictimPtr = victimPtr;
640 }
641 victimPtr++;
642 } else if (hcmp(vtime, maxVictimTime)((vtime).high<(maxVictimTime).high? -1 : ((vtime).high >
(maxVictimTime).high? 1 : ((vtime).low <(maxVictimTime).low
? -1 : ((vtime).low > (maxVictimTime).low? 1 : 0))))
< 0) {
643 /*
644 * We're older than youngest victim, so we replace at
645 * least one victim
646 */
647 /* find youngest (largest LRU) victim */
648 j = maxVictimPtr;
649 if (j == victimPtr)
650 osi_Panic("getdownd local");
651 victims[j] = i;
652 hset(victimTimes[j], vtime)((victimTimes[j]) = (vtime));
653 /* recompute maxVictimTime */
654 hset(maxVictimTime, vtime)((maxVictimTime) = (vtime));
655 for (j = 0; j < victimPtr; j++)
656 if (hcmp(maxVictimTime, victimTimes[j])((maxVictimTime).high<(victimTimes[j]).high? -1 : ((maxVictimTime
).high > (victimTimes[j]).high? 1 : ((maxVictimTime).low <
(victimTimes[j]).low? -1 : ((maxVictimTime).low > (victimTimes
[j]).low? 1 : 0))))
< 0) {
657 hset(maxVictimTime, victimTimes[j])((maxVictimTime) = (victimTimes[j]));
658 maxVictimPtr = j;
659 }
660 }
661 } /* big for loop */
662
663 /* now really reclaim the victims */
664 j = 0; /* flag to track if we actually got any of the victims */
665 /* first, hold all the victims, since we're going to release the lock
666 * during the truncate operation.
667 */
668 for (i = 0; i < victimPtr; i++) {
669 tdc = afs_GetDSlot(victims[i], 0)(*(afs_cacheType->GetDSlot))(victims[i], 0);
670 /* We got tdc->tlock(R) here */
671 if (tdc->refCount == 1)
672 victimDCs[i] = tdc;
673 else
674 victimDCs[i] = 0;
675 ReleaseReadLock(&tdc->tlock)do { ; if (!(--((&tdc->tlock)->readers_reading)) &&
(&tdc->tlock)->wait_states) Afs_Lock_ReleaseW(&
tdc->tlock) ; if ( (&tdc->tlock)->pid_last_reader
== (((__curthread())->td_proc)->p_pid ) ) (&tdc->
tlock)->pid_last_reader =0; } while (0)
;
676 if (!victimDCs[i])
677 afs_PutDCache(tdc);
678 }
679 for (i = 0; i < victimPtr; i++) {
680 /* q is first elt in dcache entry */
681 tdc = victimDCs[i];
682 /* now, since we're dropping the afs_xdcache lock below, we
683 * have to verify, before proceeding, that there are no other
684 * references to this dcache entry, even now. Note that we
685 * compare with 1, since we bumped it above when we called
686 * afs_GetDSlot to preserve the entry's identity.
687 */
688 if (tdc && tdc->refCount == 1) {
689 unsigned char chunkFlags;
690 afs_size_t tchunkoffset = 0;
691 afid = &tdc->f.fid;
692 /* xdcache is lower than the xvcache lock */
693 ReleaseWriteLock(&afs_xdcache)do { ; (&afs_xdcache)->excl_locked &= ~2; if ((&
afs_xdcache)->wait_states) Afs_Lock_ReleaseR(&afs_xdcache
); (&afs_xdcache)->pid_writer=0; } while (0)
;
694 ObtainReadLock(&afs_xvcache)do { ; if (!((&afs_xvcache)->excl_locked & 2)) ((&
afs_xvcache)->readers_reading)++; else Afs_Lock_Obtain(&
afs_xvcache, 1); (&afs_xvcache)->pid_last_reader = (((
__curthread())->td_proc)->p_pid ); } while (0)
;
695 tvc = afs_FindVCache(afid, 0, 0 /* no stats, no vlru */ );
696 ReleaseReadLock(&afs_xvcache)do { ; if (!(--((&afs_xvcache)->readers_reading)) &&
(&afs_xvcache)->wait_states) Afs_Lock_ReleaseW(&afs_xvcache
) ; if ( (&afs_xvcache)->pid_last_reader == (((__curthread
())->td_proc)->p_pid ) ) (&afs_xvcache)->pid_last_reader
=0; } while (0)
;
697 ObtainWriteLock(&afs_xdcache, 527)do { ; if (!(&afs_xdcache)->excl_locked && !(&
afs_xdcache)->readers_reading) (&afs_xdcache) -> excl_locked
= 2; else Afs_Lock_Obtain(&afs_xdcache, 2); (&afs_xdcache
)->pid_writer = (((__curthread())->td_proc)->p_pid )
; (&afs_xdcache)->src_indicator = 527; } while (0)
;
698 skip = 0;
699 if (tdc->refCount > 1)
700 skip = 1;
701 if (tvc) {
702 tchunkoffset = AFS_CHUNKTOBASE(tdc->f.chunk)((tdc->f.chunk == 0) ? 0 : ((afs_size_t) afs_FirstCSize + (
(afs_size_t) (tdc->f.chunk - 1) << afs_LogChunk)))
;
703 chunkFlags = afs_indexFlags[tdc->index];
704 if (((phase & 1) == 0) && osi_Active(tvc))
705 skip = 1;
706 if (((phase & 1) == 1) && osi_Active(tvc)
707 && (tvc->f.states & CDCLock0x02000000)
708 && (chunkFlags & IFAnyPages32))
709 skip = 1;
710 if (chunkFlags & IFDataMod4)
711 skip = 1;
712 afs_Trace4(afs_iclSetp, CM_TRACE_GETDOWND,(((afs_iclSetp) && (afs_iclSetp->states & 2)) ?
afs_icl_Event4(afs_iclSetp, (701087753L), (1<<24)+((2)
<<18)+((7)<<12)+((7)<<6)+(8), (long)(tvc), (
long)(skip), (long)(tdc->index), (long)((&tchunkoffset
))) : 0)
713 ICL_TYPE_POINTER, tvc, ICL_TYPE_INT32, skip,(((afs_iclSetp) && (afs_iclSetp->states & 2)) ?
afs_icl_Event4(afs_iclSetp, (701087753L), (1<<24)+((2)
<<18)+((7)<<12)+((7)<<6)+(8), (long)(tvc), (
long)(skip), (long)(tdc->index), (long)((&tchunkoffset
))) : 0)
714 ICL_TYPE_INT32, tdc->index, ICL_TYPE_OFFSET,(((afs_iclSetp) && (afs_iclSetp->states & 2)) ?
afs_icl_Event4(afs_iclSetp, (701087753L), (1<<24)+((2)
<<18)+((7)<<12)+((7)<<6)+(8), (long)(tvc), (
long)(skip), (long)(tdc->index), (long)((&tchunkoffset
))) : 0)
715 ICL_HANDLE_OFFSET(tchunkoffset))(((afs_iclSetp) && (afs_iclSetp->states & 2)) ?
afs_icl_Event4(afs_iclSetp, (701087753L), (1<<24)+((2)
<<18)+((7)<<12)+((7)<<6)+(8), (long)(tvc), (
long)(skip), (long)(tdc->index), (long)((&tchunkoffset
))) : 0)
;
716
717#if defined(AFS_SUN5_ENV)
718 /*
719 * Now we try to invalidate pages. We do this only for
720 * Solaris. For other platforms, it's OK to recycle a
721 * dcache entry out from under a page, because the strategy
722 * function can call afs_GetDCache().
723 */
724 if (!skip && (chunkFlags & IFAnyPages32)) {
725 int code;
726
727 ReleaseWriteLock(&afs_xdcache)do { ; (&afs_xdcache)->excl_locked &= ~2; if ((&
afs_xdcache)->wait_states) Afs_Lock_ReleaseR(&afs_xdcache
); (&afs_xdcache)->pid_writer=0; } while (0)
;
728 ObtainWriteLock(&tvc->vlock, 543)do { ; if (!(&tvc->vlock)->excl_locked && !
(&tvc->vlock)->readers_reading) (&tvc->vlock
) -> excl_locked = 2; else Afs_Lock_Obtain(&tvc->vlock
, 2); (&tvc->vlock)->pid_writer = (((__curthread())
->td_proc)->p_pid ); (&tvc->vlock)->src_indicator
= 543; } while (0)
;
729 if (!QEmpty(&tvc->multiPage)((&tvc->multiPage)->prev == (&tvc->multiPage
))
) {
730 if (phase < 3 || osi_VM_MultiPageConflict(tvc, tdc)) {
731 skip = 1;
732 goto endmultipage;
733 }
734 }
735 /* block locking pages */
736 tvc->vstates |= VPageCleaning0x2;
737 /* block getting new pages */
738 tvc->activeV++;
739 ReleaseWriteLock(&tvc->vlock)do { ; (&tvc->vlock)->excl_locked &= ~2; if ((&
tvc->vlock)->wait_states) Afs_Lock_ReleaseR(&tvc->
vlock); (&tvc->vlock)->pid_writer=0; } while (0)
;
740 /* One last recheck */
741 ObtainWriteLock(&afs_xdcache, 333)do { ; if (!(&afs_xdcache)->excl_locked && !(&
afs_xdcache)->readers_reading) (&afs_xdcache) -> excl_locked
= 2; else Afs_Lock_Obtain(&afs_xdcache, 2); (&afs_xdcache
)->pid_writer = (((__curthread())->td_proc)->p_pid )
; (&afs_xdcache)->src_indicator = 333; } while (0)
;
742 chunkFlags = afs_indexFlags[tdc->index];
743 if (tdc->refCount > 1 || (chunkFlags & IFDataMod4)
744 || (osi_Active(tvc) && (tvc->f.states & CDCLock0x02000000)
745 && (chunkFlags & IFAnyPages32))) {
746 skip = 1;
747 ReleaseWriteLock(&afs_xdcache)do { ; (&afs_xdcache)->excl_locked &= ~2; if ((&
afs_xdcache)->wait_states) Afs_Lock_ReleaseR(&afs_xdcache
); (&afs_xdcache)->pid_writer=0; } while (0)
;
748 goto endputpage;
749 }
750 ReleaseWriteLock(&afs_xdcache)do { ; (&afs_xdcache)->excl_locked &= ~2; if ((&
afs_xdcache)->wait_states) Afs_Lock_ReleaseR(&afs_xdcache
); (&afs_xdcache)->pid_writer=0; } while (0)
;
751
752 code = osi_VM_GetDownD(tvc, tdc);
753
754 ObtainWriteLock(&afs_xdcache, 269)do { ; if (!(&afs_xdcache)->excl_locked && !(&
afs_xdcache)->readers_reading) (&afs_xdcache) -> excl_locked
= 2; else Afs_Lock_Obtain(&afs_xdcache, 2); (&afs_xdcache
)->pid_writer = (((__curthread())->td_proc)->p_pid )
; (&afs_xdcache)->src_indicator = 269; } while (0)
;
755 /* we actually removed all pages, clean and dirty */
756 if (code == 0) {
757 afs_indexFlags[tdc->index] &=
758 ~(IFDirtyPages16 | IFAnyPages32);
759 } else
760 skip = 1;
761 ReleaseWriteLock(&afs_xdcache)do { ; (&afs_xdcache)->excl_locked &= ~2; if ((&
afs_xdcache)->wait_states) Afs_Lock_ReleaseR(&afs_xdcache
); (&afs_xdcache)->pid_writer=0; } while (0)
;
762 endputpage:
763 ObtainWriteLock(&tvc->vlock, 544)do { ; if (!(&tvc->vlock)->excl_locked && !
(&tvc->vlock)->readers_reading) (&tvc->vlock
) -> excl_locked = 2; else Afs_Lock_Obtain(&tvc->vlock
, 2); (&tvc->vlock)->pid_writer = (((__curthread())
->td_proc)->p_pid ); (&tvc->vlock)->src_indicator
= 544; } while (0)
;
764 if (--tvc->activeV == 0
765 && (tvc->vstates & VRevokeWait0x1)) {
766 tvc->vstates &= ~VRevokeWait0x1;
767 afs_osi_Wakeup((char *)&tvc->vstates);
768
769 }
770 if (tvc->vstates & VPageCleaning0x2) {
771 tvc->vstates &= ~VPageCleaning0x2;
772 afs_osi_Wakeup((char *)&tvc->vstates);
773 }
774 endmultipage:
775 ReleaseWriteLock(&tvc->vlock)do { ; (&tvc->vlock)->excl_locked &= ~2; if ((&
tvc->vlock)->wait_states) Afs_Lock_ReleaseR(&tvc->
vlock); (&tvc->vlock)->pid_writer=0; } while (0)
;
776 } else
777#endif /* AFS_SUN5_ENV */
778 {
779 ReleaseWriteLock(&afs_xdcache)do { ; (&afs_xdcache)->excl_locked &= ~2; if ((&
afs_xdcache)->wait_states) Afs_Lock_ReleaseR(&afs_xdcache
); (&afs_xdcache)->pid_writer=0; } while (0)
;
780 }
781
782 afs_PutVCache(tvc); /*XXX was AFS_FAST_RELE?*/
783 ObtainWriteLock(&afs_xdcache, 528)do { ; if (!(&afs_xdcache)->excl_locked && !(&
afs_xdcache)->readers_reading) (&afs_xdcache) -> excl_locked
= 2; else Afs_Lock_Obtain(&afs_xdcache, 2); (&afs_xdcache
)->pid_writer = (((__curthread())->td_proc)->p_pid )
; (&afs_xdcache)->src_indicator = 528; } while (0)
;
784 if (afs_indexFlags[tdc->index] &
785 (IFDataMod4 | IFDirtyPages16 | IFAnyPages32))
786 skip = 1;
787 if (tdc->refCount > 1)
788 skip = 1;
789 }
790#if defined(AFS_SUN5_ENV)
791 else {
792 /* no vnode, so IFDirtyPages is spurious (we don't
793 * sweep dcaches on vnode recycling, so we can have
794 * DIRTYPAGES set even when all pages are gone). Just
795 * clear the flag.
796 * Hold vcache lock to prevent vnode from being
797 * created while we're clearing IFDirtyPages.
798 */
799 afs_indexFlags[tdc->index] &=
800 ~(IFDirtyPages16 | IFAnyPages32);
801 }
802#endif
803 if (skip) {
804 /* skip this guy and mark him as recently used */
805 afs_indexFlags[tdc->index] |= IFFlag8;
806 afs_Trace4(afs_iclSetp, CM_TRACE_GETDOWND,(((afs_iclSetp) && (afs_iclSetp->states & 2)) ?
afs_icl_Event4(afs_iclSetp, (701087753L), (1<<24)+((2)
<<18)+((7)<<12)+((7)<<6)+(8), (long)(tvc), (
long)(2), (long)(tdc->index), (long)((&tchunkoffset)))
: 0)
807 ICL_TYPE_POINTER, tvc, ICL_TYPE_INT32, 2,(((afs_iclSetp) && (afs_iclSetp->states & 2)) ?
afs_icl_Event4(afs_iclSetp, (701087753L), (1<<24)+((2)
<<18)+((7)<<12)+((7)<<6)+(8), (long)(tvc), (
long)(2), (long)(tdc->index), (long)((&tchunkoffset)))
: 0)
808 ICL_TYPE_INT32, tdc->index, ICL_TYPE_OFFSET,(((afs_iclSetp) && (afs_iclSetp->states & 2)) ?
afs_icl_Event4(afs_iclSetp, (701087753L), (1<<24)+((2)
<<18)+((7)<<12)+((7)<<6)+(8), (long)(tvc), (
long)(2), (long)(tdc->index), (long)((&tchunkoffset)))
: 0)
809 ICL_HANDLE_OFFSET(tchunkoffset))(((afs_iclSetp) && (afs_iclSetp->states & 2)) ?
afs_icl_Event4(afs_iclSetp, (701087753L), (1<<24)+((2)
<<18)+((7)<<12)+((7)<<6)+(8), (long)(tvc), (
long)(2), (long)(tdc->index), (long)((&tchunkoffset)))
: 0)
;
810 } else {
811 /* flush this dude from the data cache and reclaim;
812 * first, make sure no one will care that we damage
813 * it, by removing it from all hash tables. Then,
814 * melt it down for parts. Note that any concurrent
815 * (new possibility!) calls to GetDownD won't touch
816 * this guy because his reference count is > 0. */
817 afs_Trace4(afs_iclSetp, CM_TRACE_GETDOWND,(((afs_iclSetp) && (afs_iclSetp->states & 2)) ?
afs_icl_Event4(afs_iclSetp, (701087753L), (1<<24)+((2)
<<18)+((7)<<12)+((7)<<6)+(8), (long)(tvc), (
long)(3), (long)(tdc->index), (long)((&tchunkoffset)))
: 0)
818 ICL_TYPE_POINTER, tvc, ICL_TYPE_INT32, 3,(((afs_iclSetp) && (afs_iclSetp->states & 2)) ?
afs_icl_Event4(afs_iclSetp, (701087753L), (1<<24)+((2)
<<18)+((7)<<12)+((7)<<6)+(8), (long)(tvc), (
long)(3), (long)(tdc->index), (long)((&tchunkoffset)))
: 0)
819 ICL_TYPE_INT32, tdc->index, ICL_TYPE_OFFSET,(((afs_iclSetp) && (afs_iclSetp->states & 2)) ?
afs_icl_Event4(afs_iclSetp, (701087753L), (1<<24)+((2)
<<18)+((7)<<12)+((7)<<6)+(8), (long)(tvc), (
long)(3), (long)(tdc->index), (long)((&tchunkoffset)))
: 0)
820 ICL_HANDLE_OFFSET(tchunkoffset))(((afs_iclSetp) && (afs_iclSetp->states & 2)) ?
afs_icl_Event4(afs_iclSetp, (701087753L), (1<<24)+((2)
<<18)+((7)<<12)+((7)<<6)+(8), (long)(tvc), (
long)(3), (long)(tdc->index), (long)((&tchunkoffset)))
: 0)
;
821 AFS_STATCNT(afs_gget)((afs_cmstats.callInfo.C_afs_gget)++);
822 afs_HashOutDCache(tdc, 1);
823 if (tdc->f.chunkBytes != 0) {
824 discard = 1;
825 if (aneedSpace)
826 *aneedSpace -=
827 (tdc->f.chunkBytes + afs_fsfragsize) >> 10;
828 } else {
829 discard = 0;
830 }
831 if (discard) {
832 afs_DiscardDCache(tdc);
833 } else {
834 afs_FreeDCache(tdc);
835 }
836 anumber--;
837 j = 1; /* we reclaimed at least one victim */
838 }
839 }
840 afs_PutDCache(tdc);
841 } /* end of for victims loop */
842
843 if (phase < 5) {
844 /* Phase is 0 and no one was found, so try phase 1 (ignore
845 * osi_Active flag) */
846 if (j == 0) {
847 phase++;
848 for (i = 0; i < afs_cacheFiles; i++)
849 /* turn off all flags */
850 afs_indexFlags[i] &= ~IFFlag8;
851 }
852 } else {
853 /* found no one in phases 0-5, we're hosed */
854 if (victimPtr == 0)
855 break;
856 }
857 } /* big while loop */
858
859 return;
860
861} /*afs_GetDownD */
862
863
864/*!
865 * Remove adc from any hash tables that would allow it to be located
866 * again by afs_FindDCache or afs_GetDCache.
867 *
868 * \param adc Pointer to dcache entry to remove from hash tables.
869 *
870 * \note Locks: Must have the afs_xdcache lock write-locked to call this function.
871 *
872 */
873int
874afs_HashOutDCache(struct dcache *adc, int zap)
875{
876 int i, us;
877
878 AFS_STATCNT(afs_glink)((afs_cmstats.callInfo.C_afs_glink)++);
879 if (zap)
880 /* we know this guy's in the LRUQ. We'll move dude into DCQ below */
881 DZap(adc);
882 /* if this guy is in the hash table, pull him out */
883 if (adc->f.fid.Fid.Volume != 0) {
884 /* remove entry from first hash chains */
885 i = DCHash(&adc->f.fid, adc->f.chunk)((((&adc->f.fid)->Fid.Vnode + (&adc->f.fid)->
Fid.Volume + (adc->f.chunk))) & (afs_dhashsize-1))
;
886 us = afs_dchashTbl[i];
887 if (us == adc->index) {
888 /* first dude in the list */
889 afs_dchashTbl[i] = afs_dcnextTbl[adc->index];
890 } else {
891 /* somewhere on the chain */
892 while (us != NULLIDX(-1)) {
893 if (afs_dcnextTbl[us] == adc->index) {
894 /* found item pointing at the one to delete */
895 afs_dcnextTbl[us] = afs_dcnextTbl[adc->index];
896 break;
897 }
898 us = afs_dcnextTbl[us];
899 }
900 if (us == NULLIDX(-1))
901 osi_Panic("dcache hc");
902 }
903 /* remove entry from *other* hash chain */
904 i = DVHash(&adc->f.fid)((((&adc->f.fid)->Fid.Vnode + (&adc->f.fid)->
Fid.Volume )) & (afs_dhashsize-1))
;
905 us = afs_dvhashTbl[i];
906 if (us == adc->index) {
907 /* first dude in the list */
908 afs_dvhashTbl[i] = afs_dvnextTbl[adc->index];
909 } else {
910 /* somewhere on the chain */
911 while (us != NULLIDX(-1)) {
912 if (afs_dvnextTbl[us] == adc->index) {
913 /* found item pointing at the one to delete */
914 afs_dvnextTbl[us] = afs_dvnextTbl[adc->index];
915 break;
916 }
917 us = afs_dvnextTbl[us];
918 }
919 if (us == NULLIDX(-1))
920 osi_Panic("dcache hv");
921 }
922 }
923
924 if (zap) {
925 /* prevent entry from being found on a reboot (it is already out of
926 * the hash table, but after a crash, we just look at fid fields of
927 * stable (old) entries).
928 */
929 adc->f.fid.Fid.Volume = 0; /* invalid */
930
931 /* mark entry as modified */
932 adc->dflags |= DFEntryMod0x02;
933 }
934
935 /* all done */
936 return 0;
937} /*afs_HashOutDCache */
938
939/*!
940 * Flush the given dcache entry, pulling it from hash chains
941 * and truncating the associated cache file.
942 *
943 * \param adc Ptr to dcache entry to flush.
944 *
945 * \note Environment:
946 * This routine must be called with the afs_xdcache lock held
947 * (in write mode).
948 */
949void
950afs_FlushDCache(struct dcache *adc)
951{
952 AFS_STATCNT(afs_FlushDCache)((afs_cmstats.callInfo.C_afs_FlushDCache)++);
953 /*
954 * Bump the number of cache files flushed.
955 */
956 afs_stats_cmperf.cacheFlushes++;
957
958 /* remove from all hash tables */
959 afs_HashOutDCache(adc, 1);
960
961 /* Free its space; special case null operation, since truncate operation
962 * in UFS is slow even in this case, and this allows us to pre-truncate
963 * these files at more convenient times with fewer locks set
964 * (see afs_GetDownD).
965 */
966 if (adc->f.chunkBytes != 0) {
967 afs_DiscardDCache(adc);
968 afs_MaybeWakeupTruncateDaemon();
969 } else {
970 afs_FreeDCache(adc);
971 }
972
973 if (afs_WaitForCacheDrain) {
974 if (afs_blocksUsed <=
975 PERCENT(CM_CACHESIZEDRAINEDPCT, afs_cacheBlocks)((afs_cacheBlocks & 0xffe00000) ? ((afs_cacheBlocks) / 100
* (95)) : ((95) * (afs_cacheBlocks) / 100))
) {
976 afs_WaitForCacheDrain = 0;
977 afs_osi_Wakeup(&afs_WaitForCacheDrain);
978 }
979 }
980} /*afs_FlushDCache */
981
982
983/*!
984 * Put a dcache entry on the free dcache entry list.
985 *
986 * \param adc dcache entry to free.
987 *
988 * \note Environment: called with afs_xdcache lock write-locked.
989 */
990static void
991afs_FreeDCache(struct dcache *adc)
992{
993 /* Thread on free list, update free list count and mark entry as
994 * freed in its indexFlags element. Also, ensure DCache entry gets
995 * written out (set DFEntryMod).
996 */
997
998 afs_dvnextTbl[adc->index] = afs_freeDCList;
999 afs_freeDCList = adc->index;
1000 afs_freeDCCount++;
1001 afs_indexFlags[adc->index] |= IFFree2;
1002 adc->dflags |= DFEntryMod0x02;
1003
1004 if (afs_WaitForCacheDrain) {
1005 if ((afs_blocksUsed - afs_blocksDiscarded) <=
1006 PERCENT(CM_CACHESIZEDRAINEDPCT, afs_cacheBlocks)((afs_cacheBlocks & 0xffe00000) ? ((afs_cacheBlocks) / 100
* (95)) : ((95) * (afs_cacheBlocks) / 100))
) {
1007 afs_WaitForCacheDrain = 0;
1008 afs_osi_Wakeup(&afs_WaitForCacheDrain);
1009 }
1010 }
1011} /* afs_FreeDCache */
1012
1013/*!
1014 * Discard the cache element by moving it to the discardDCList.
1015 * This puts the cache element into a quasi-freed state, where
1016 * the space may be reused, but the file has not been truncated.
1017 *
1018 * \note Major Assumptions Here:
1019 * Assumes that frag size is an integral power of two, less one,
1020 * and that this is a two's complement machine. I don't
1021 * know of any filesystems which violate this assumption...
1022 *
1023 * \param adr Ptr to dcache entry.
1024 *
1025 * \note Environment:
1026 * Must be called with afs_xdcache write-locked.
1027 */
1028
1029static void
1030afs_DiscardDCache(struct dcache *adc)
1031{
1032 afs_int32 size;
1033
1034 AFS_STATCNT(afs_DiscardDCache)((afs_cmstats.callInfo.C_afs_DiscardDCache)++);
1035
1036 osi_Assert(adc->refCount == 1)(void)((adc->refCount == 1) || (osi_AssertFailK( "adc->refCount == 1"
, "/home/wollman/openafs/src/afs/afs_dcache.c", 1036), 0))
;
1037
1038 size = ((adc->f.chunkBytes + afs_fsfragsize) ^ afs_fsfragsize) >> 10; /* round up */
1039 afs_blocksDiscarded += size;
1040 afs_stats_cmperf.cacheBlocksDiscarded = afs_blocksDiscarded;
1041
1042 afs_dvnextTbl[adc->index] = afs_discardDCList;
1043 afs_discardDCList = adc->index;
1044 afs_discardDCCount++;
1045
1046 adc->f.fid.Fid.Volume = 0;
1047 adc->dflags |= DFEntryMod0x02;
1048 afs_indexFlags[adc->index] |= IFDiscarded64;
1049
1050 if (afs_WaitForCacheDrain) {
1051 if ((afs_blocksUsed - afs_blocksDiscarded) <=
1052 PERCENT(CM_CACHESIZEDRAINEDPCT, afs_cacheBlocks)((afs_cacheBlocks & 0xffe00000) ? ((afs_cacheBlocks) / 100
* (95)) : ((95) * (afs_cacheBlocks) / 100))
) {
1053 afs_WaitForCacheDrain = 0;
1054 afs_osi_Wakeup(&afs_WaitForCacheDrain);
1055 }
1056 }
1057
1058} /*afs_DiscardDCache */
1059
1060/*!
1061 * Free the next element on the list of discarded cache elements.
1062 */
1063static void
1064afs_FreeDiscardedDCache(void)
1065{
1066 struct dcache *tdc;
1067 struct osi_file *tfile;
1068 afs_int32 size;
1069
1070 AFS_STATCNT(afs_FreeDiscardedDCache)((afs_cmstats.callInfo.C_afs_FreeDiscardedDCache)++);
1071
1072 ObtainWriteLock(&afs_xdcache, 510)do { ; if (!(&afs_xdcache)->excl_locked && !(&
afs_xdcache)->readers_reading) (&afs_xdcache) -> excl_locked
= 2; else Afs_Lock_Obtain(&afs_xdcache, 2); (&afs_xdcache
)->pid_writer = (((__curthread())->td_proc)->p_pid )
; (&afs_xdcache)->src_indicator = 510; } while (0)
;
1073 if (!afs_blocksDiscarded) {
1074 ReleaseWriteLock(&afs_xdcache)do { ; (&afs_xdcache)->excl_locked &= ~2; if ((&
afs_xdcache)->wait_states) Afs_Lock_ReleaseR(&afs_xdcache
); (&afs_xdcache)->pid_writer=0; } while (0)
;
1075 return;
1076 }
1077
1078 /*
1079 * Get an entry from the list of discarded cache elements
1080 */
1081 tdc = afs_GetDSlot(afs_discardDCList, 0)(*(afs_cacheType->GetDSlot))(afs_discardDCList, 0);
1082 osi_Assert(tdc->refCount == 1)(void)((tdc->refCount == 1) || (osi_AssertFailK( "tdc->refCount == 1"
, "/home/wollman/openafs/src/afs/afs_dcache.c", 1082), 0))
;
1083 ReleaseReadLock(&tdc->tlock)do { ; if (!(--((&tdc->tlock)->readers_reading)) &&
(&tdc->tlock)->wait_states) Afs_Lock_ReleaseW(&
tdc->tlock) ; if ( (&tdc->tlock)->pid_last_reader
== (((__curthread())->td_proc)->p_pid ) ) (&tdc->
tlock)->pid_last_reader =0; } while (0)
;
1084
1085 afs_discardDCList = afs_dvnextTbl[tdc->index];
1086 afs_dvnextTbl[tdc->index] = NULLIDX(-1);
1087 afs_discardDCCount--;
1088 size = ((tdc->f.chunkBytes + afs_fsfragsize) ^ afs_fsfragsize) >> 10; /* round up */
1089 afs_blocksDiscarded -= size;
1090 afs_stats_cmperf.cacheBlocksDiscarded = afs_blocksDiscarded;
1091 /* We can lock because we just took it off the free list */
1092 ObtainWriteLock(&tdc->lock, 626)do { ; if (!(&tdc->lock)->excl_locked && !(
&tdc->lock)->readers_reading) (&tdc->lock) ->
excl_locked = 2; else Afs_Lock_Obtain(&tdc->lock, 2);
(&tdc->lock)->pid_writer = (((__curthread())->td_proc
)->p_pid ); (&tdc->lock)->src_indicator = 626; }
while (0)
;
1093 ReleaseWriteLock(&afs_xdcache)do { ; (&afs_xdcache)->excl_locked &= ~2; if ((&
afs_xdcache)->wait_states) Afs_Lock_ReleaseR(&afs_xdcache
); (&afs_xdcache)->pid_writer=0; } while (0)
;
1094
1095 /*
1096 * Truncate the element to reclaim its space
1097 */
1098 tfile = afs_CFileOpen(&tdc->f.inode)(void *)(*(afs_cacheType->open))(&tdc->f.inode);
1099 afs_CFileTruncate(tfile, 0)(*(afs_cacheType->truncate))((tfile), 0);
1100 afs_CFileClose(tfile)(*(afs_cacheType->close))(tfile);
1101 afs_AdjustSize(tdc, 0);
1102 afs_DCMoveBucket(tdc, 0, 0);
1103
1104 /*
1105 * Free the element we just truncated
1106 */
1107 ObtainWriteLock(&afs_xdcache, 511)do { ; if (!(&afs_xdcache)->excl_locked && !(&
afs_xdcache)->readers_reading) (&afs_xdcache) -> excl_locked
= 2; else Afs_Lock_Obtain(&afs_xdcache, 2); (&afs_xdcache
)->pid_writer = (((__curthread())->td_proc)->p_pid )
; (&afs_xdcache)->src_indicator = 511; } while (0)
;
1108 afs_indexFlags[tdc->index] &= ~IFDiscarded64;
1109 afs_FreeDCache(tdc);
1110 tdc->f.states &= ~(DRO1|DBackup2|DRW4);
1111 ReleaseWriteLock(&tdc->lock)do { ; (&tdc->lock)->excl_locked &= ~2; if ((&
tdc->lock)->wait_states) Afs_Lock_ReleaseR(&tdc->
lock); (&tdc->lock)->pid_writer=0; } while (0)
;
1112 afs_PutDCache(tdc);
1113 ReleaseWriteLock(&afs_xdcache)do { ; (&afs_xdcache)->excl_locked &= ~2; if ((&
afs_xdcache)->wait_states) Afs_Lock_ReleaseR(&afs_xdcache
); (&afs_xdcache)->pid_writer=0; } while (0)
;
1114}
1115
1116/*!
1117 * Free as many entries from the list of discarded cache elements
1118 * as we need to get the free space down below CM_WAITFORDRAINPCT (98%).
1119 *
1120 * \return 0
1121 */
1122int
1123afs_MaybeFreeDiscardedDCache(void)
1124{
1125
1126 AFS_STATCNT(afs_MaybeFreeDiscardedDCache)((afs_cmstats.callInfo.C_afs_MaybeFreeDiscardedDCache)++);
1127
1128 while (afs_blocksDiscarded
1129 && (afs_blocksUsed >
1130 PERCENT(CM_WAITFORDRAINPCT, afs_cacheBlocks)((afs_cacheBlocks & 0xffe00000) ? ((afs_cacheBlocks) / 100
* (98)) : ((98) * (afs_cacheBlocks) / 100))
)) {
1131 afs_FreeDiscardedDCache();
1132 }
1133 return 0;
1134}
1135
1136/*!
1137 * Try to free up a certain number of disk slots.
1138 *
1139 * \param anumber Targeted number of disk slots to free up.
1140 *
1141 * \note Environment:
1142 * Must be called with afs_xdcache write-locked.
1143 *
1144 */
1145static void
1146afs_GetDownDSlot(int anumber)
1147{
1148 struct afs_q *tq, *nq;
1149 struct dcache *tdc;
1150 int ix;
1151 unsigned int cnt;
1152
1153 AFS_STATCNT(afs_GetDownDSlot)((afs_cmstats.callInfo.C_afs_GetDownDSlot)++);
1154 if (cacheDiskType == AFS_FCACHE_TYPE_MEM0x1)
1155 osi_Panic("diskless getdowndslot");
1156
1157 if (CheckLock(&afs_xdcache)((&afs_xdcache)->excl_locked? (int) -1 : (int) (&afs_xdcache
)->readers_reading)
!= -1)
1158 osi_Panic("getdowndslot nolock");
1159
1160 /* decrement anumber first for all dudes in free list */
1161 for (tdc = afs_freeDSList; tdc; tdc = (struct dcache *)tdc->lruq.next)
1162 anumber--;
1163 if (anumber <= 0)
1164 return; /* enough already free */
1165
1166 for (cnt = 0, tq = afs_DLRU.prev; tq != &afs_DLRU && anumber > 0;
1167 tq = nq, cnt++) {
1168 tdc = (struct dcache *)tq; /* q is first elt in dcache entry */
1169 nq = QPrev(tq)((tq)->prev); /* in case we remove it */
1170 if (tdc->refCount == 0) {
1171 if ((ix = tdc->index) == NULLIDX(-1))
1172 osi_Panic("getdowndslot");
1173 /* pull the entry out of the lruq and put it on the free list */
1174 QRemove(&tdc->lruq)((&tdc->lruq)->next->prev = (&tdc->lruq)->
prev, (&tdc->lruq)->prev->next = (&tdc->lruq
)->next, (&tdc->lruq)->prev = ((void *)0), (&
tdc->lruq)->next = ((void *)0))
;
1175
1176 /* write-through if modified */
1177 if (tdc->dflags & DFEntryMod0x02) {
1178#if defined(AFS_SGI_ENV) && defined(AFS_SGI_SHORTSTACK)
1179 /*
1180 * ask proxy to do this for us - we don't have the stack space
1181 */
1182 while (tdc->dflags & DFEntryMod0x02) {
1183 int s;
1184 AFS_GUNLOCK()do { (void)0; _mtx_unlock_flags(((&afs_global_mtx)), (0),
"/home/wollman/openafs/src/afs/afs_dcache.c", 1184); } while
(0)
;
1185 s = SPLOCK(afs_sgibklock);
1186 if (afs_sgibklist == NULL((void *)0)) {
1187 /* if slot is free, grab it. */
1188 afs_sgibklist = tdc;
1189 SV_SIGNAL(&afs_sgibksync);
1190 }
1191 /* wait for daemon to (start, then) finish. */
1192 SP_WAIT(afs_sgibklock, s, &afs_sgibkwait, PINOD((80) + 8));
1193 AFS_GLOCK()do { (void)0; _mtx_lock_flags(((&afs_global_mtx)), (0), "/home/wollman/openafs/src/afs/afs_dcache.c"
, 1193); (void)0; } while (0)
;
1194 }
1195#else
1196 tdc->dflags &= ~DFEntryMod0x02;
1197 afs_WriteDCache(tdc, 1);
1198#endif
1199 }
1200
1201 /* finally put the entry in the free list */
1202 afs_indexTable[ix] = NULL((void *)0);
1203 afs_indexFlags[ix] &= ~IFEverUsed1;
1204 tdc->index = NULLIDX(-1);
1205 tdc->lruq.next = (struct afs_q *)afs_freeDSList;
1206 afs_freeDSList = tdc;
1207 anumber--;
1208 }
1209 }
1210} /*afs_GetDownDSlot */
1211
1212
1213/*
1214 * afs_RefDCache
1215 *
1216 * Description:
1217 * Increment the reference count on a disk cache entry,
1218 * which already has a non-zero refcount. In order to
1219 * increment the refcount of a zero-reference entry, you
1220 * have to hold afs_xdcache.
1221 *
1222 * Parameters:
1223 * adc : Pointer to the dcache entry to increment.
1224 *
1225 * Environment:
1226 * Nothing interesting.
1227 */
1228int
1229afs_RefDCache(struct dcache *adc)
1230{
1231 ObtainWriteLock(&adc->tlock, 627)do { ; if (!(&adc->tlock)->excl_locked && !
(&adc->tlock)->readers_reading) (&adc->tlock
) -> excl_locked = 2; else Afs_Lock_Obtain(&adc->tlock
, 2); (&adc->tlock)->pid_writer = (((__curthread())
->td_proc)->p_pid ); (&adc->tlock)->src_indicator
= 627; } while (0)
;
1232 if (adc->refCount < 0)
1233 osi_Panic("RefDCache: negative refcount");
1234 adc->refCount++;
1235 ReleaseWriteLock(&adc->tlock)do { ; (&adc->tlock)->excl_locked &= ~2; if ((&
adc->tlock)->wait_states) Afs_Lock_ReleaseR(&adc->
tlock); (&adc->tlock)->pid_writer=0; } while (0)
;
1236 return 0;
1237}
1238
1239
1240/*
1241 * afs_PutDCache
1242 *
1243 * Description:
1244 * Decrement the reference count on a disk cache entry.
1245 *
1246 * Parameters:
1247 * ad : Ptr to the dcache entry to decrement.
1248 *
1249 * Environment:
1250 * Nothing interesting.
1251 */
1252int
1253afs_PutDCache(struct dcache *adc)
1254{
1255 AFS_STATCNT(afs_PutDCache)((afs_cmstats.callInfo.C_afs_PutDCache)++);
1256 ObtainWriteLock(&adc->tlock, 276)do { ; if (!(&adc->tlock)->excl_locked && !
(&adc->tlock)->readers_reading) (&adc->tlock
) -> excl_locked = 2; else Afs_Lock_Obtain(&adc->tlock
, 2); (&adc->tlock)->pid_writer = (((__curthread())
->td_proc)->p_pid ); (&adc->tlock)->src_indicator
= 276; } while (0)
;
1257 if (adc->refCount <= 0)
1258 osi_Panic("putdcache");
1259 --adc->refCount;
1260 ReleaseWriteLock(&adc->tlock)do { ; (&adc->tlock)->excl_locked &= ~2; if ((&
adc->tlock)->wait_states) Afs_Lock_ReleaseR(&adc->
tlock); (&adc->tlock)->pid_writer=0; } while (0)
;
1261 return 0;
1262}
1263
1264
1265/*
1266 * afs_TryToSmush
1267 *
1268 * Description:
1269 * Try to discard all data associated with this file from the
1270 * cache.
1271 *
1272 * Parameters:
1273 * avc : Pointer to the cache info for the file.
1274 *
1275 * Environment:
1276 * Both pvnLock and lock are write held.
1277 */
1278void
1279afs_TryToSmush(struct vcache *avc, afs_ucred_t *acred, int sync)
1280{
1281 struct dcache *tdc;
1282 int index;
1283 int i;
1284 AFS_STATCNT(afs_TryToSmush)((afs_cmstats.callInfo.C_afs_TryToSmush)++);
1285 afs_Trace2(afs_iclSetp, CM_TRACE_TRYTOSMUSH, ICL_TYPE_POINTER, avc,(((afs_iclSetp) && (afs_iclSetp->states & 2)) ?
afs_icl_Event2(afs_iclSetp, (701087756L), (1<<24)+((2)
<<18)+((8)<<12), (long)(avc), (long)((&avc->
f.m.Length))) : 0)
1286 ICL_TYPE_OFFSET, ICL_HANDLE_OFFSET(avc->f.m.Length))(((afs_iclSetp) && (afs_iclSetp->states & 2)) ?
afs_icl_Event2(afs_iclSetp, (701087756L), (1<<24)+((2)
<<18)+((8)<<12), (long)(avc), (long)((&avc->
f.m.Length))) : 0)
;
1287 sync = 1; /* XX Temp testing XX */
1288
1289#if defined(AFS_SUN5_ENV)
1290 ObtainWriteLock(&avc->vlock, 573)do { ; if (!(&avc->vlock)->excl_locked && !
(&avc->vlock)->readers_reading) (&avc->vlock
) -> excl_locked = 2; else Afs_Lock_Obtain(&avc->vlock
, 2); (&avc->vlock)->pid_writer = (((__curthread())
->td_proc)->p_pid ); (&avc->vlock)->src_indicator
= 573; } while (0)
;
1291 avc->activeV++; /* block new getpages */
1292 ReleaseWriteLock(&avc->vlock)do { ; (&avc->vlock)->excl_locked &= ~2; if ((&
avc->vlock)->wait_states) Afs_Lock_ReleaseR(&avc->
vlock); (&avc->vlock)->pid_writer=0; } while (0)
;
1293#endif
1294
1295 /* Flush VM pages */
1296 osi_VM_TryToSmush(avc, acred, sync);
1297
1298 /*
1299 * Get the hash chain containing all dce's for this fid
1300 */
1301 i = DVHash(&avc->f.fid)((((&avc->f.fid)->Fid.Vnode + (&avc->f.fid)->
Fid.Volume )) & (afs_dhashsize-1))
;
1302 ObtainWriteLock(&afs_xdcache, 277)do { ; if (!(&afs_xdcache)->excl_locked && !(&
afs_xdcache)->readers_reading) (&afs_xdcache) -> excl_locked
= 2; else Afs_Lock_Obtain(&afs_xdcache, 2); (&afs_xdcache
)->pid_writer = (((__curthread())->td_proc)->p_pid )
; (&afs_xdcache)->src_indicator = 277; } while (0)
;
1303 for (index = afs_dvhashTbl[i]; index != NULLIDX(-1); index = i) {
1304 i = afs_dvnextTbl[index]; /* next pointer this hash table */
1305 if (afs_indexUnique[index] == avc->f.fid.Fid.Unique) {
1306 int releaseTlock = 1;
1307 tdc = afs_GetDSlot(index, NULL)(*(afs_cacheType->GetDSlot))(index, ((void *)0));
1308 if (!FidCmp(&tdc->f.fid, &avc->f.fid)((&tdc->f.fid)->Fid.Unique != (&avc->f.fid)->
Fid.Unique || (&tdc->f.fid)->Fid.Vnode != (&avc
->f.fid)->Fid.Vnode || (&tdc->f.fid)->Fid.Volume
!= (&avc->f.fid)->Fid.Volume || (&tdc->f.fid
)->Cell != (&avc->f.fid)->Cell)
) {
1309 if (sync) {
1310 if ((afs_indexFlags[index] & IFDataMod4) == 0
1311 && tdc->refCount == 1) {
1312 ReleaseReadLock(&tdc->tlock)do { ; if (!(--((&tdc->tlock)->readers_reading)) &&
(&tdc->tlock)->wait_states) Afs_Lock_ReleaseW(&
tdc->tlock) ; if ( (&tdc->tlock)->pid_last_reader
== (((__curthread())->td_proc)->p_pid ) ) (&tdc->
tlock)->pid_last_reader =0; } while (0)
;
1313 releaseTlock = 0;
1314 afs_FlushDCache(tdc);
1315 }
1316 } else
1317 afs_indexTable[index] = 0;
1318 }
1319 if (releaseTlock)
1320 ReleaseReadLock(&tdc->tlock)do { ; if (!(--((&tdc->tlock)->readers_reading)) &&
(&tdc->tlock)->wait_states) Afs_Lock_ReleaseW(&
tdc->tlock) ; if ( (&tdc->tlock)->pid_last_reader
== (((__curthread())->td_proc)->p_pid ) ) (&tdc->
tlock)->pid_last_reader =0; } while (0)
;
1321 afs_PutDCache(tdc);
1322 }
1323 }
1324#if defined(AFS_SUN5_ENV)
1325 ObtainWriteLock(&avc->vlock, 545)do { ; if (!(&avc->vlock)->excl_locked && !
(&avc->vlock)->readers_reading) (&avc->vlock
) -> excl_locked = 2; else Afs_Lock_Obtain(&avc->vlock
, 2); (&avc->vlock)->pid_writer = (((__curthread())
->td_proc)->p_pid ); (&avc->vlock)->src_indicator
= 545; } while (0)
;
1326 if (--avc->activeV == 0 && (avc->vstates & VRevokeWait0x1)) {
1327 avc->vstates &= ~VRevokeWait0x1;
1328 afs_osi_Wakeup((char *)&avc->vstates);
1329 }
1330 ReleaseWriteLock(&avc->vlock)do { ; (&avc->vlock)->excl_locked &= ~2; if ((&
avc->vlock)->wait_states) Afs_Lock_ReleaseR(&avc->
vlock); (&avc->vlock)->pid_writer=0; } while (0)
;
1331#endif
1332 ReleaseWriteLock(&afs_xdcache)do { ; (&afs_xdcache)->excl_locked &= ~2; if ((&
afs_xdcache)->wait_states) Afs_Lock_ReleaseR(&afs_xdcache
); (&afs_xdcache)->pid_writer=0; } while (0)
;
1333 /*
1334 * It's treated like a callback so that when we do lookups we'll
1335 * invalidate the unique bit if any
1336 * trytoSmush occured during the lookup call
1337 */
1338 afs_allCBs++;
1339}
1340
1341/*
1342 * afs_DCacheMissingChunks
1343 *
1344 * Description
1345 * Given the cached info for a file, return the number of chunks that
1346 * are not available from the dcache.
1347 *
1348 * Parameters:
1349 * avc: Pointer to the (held) vcache entry to look in.
1350 *
1351 * Returns:
1352 * The number of chunks which are not currently cached.
1353 *
1354 * Environment:
1355 * The vcache entry is held upon entry.
1356 */
1357
1358int
1359afs_DCacheMissingChunks(struct vcache *avc)
1360{
1361 int i, index;
1362 afs_size_t totalLength = 0;
1363 afs_uint32 totalChunks = 0;
1364 struct dcache *tdc;
1365
1366 totalLength = avc->f.m.Length;
1367 if (avc->f.truncPos < totalLength)
1368 totalLength = avc->f.truncPos;
1369
1370 /* Length is 0, no chunk missing. */
1371 if (totalLength == 0)
1372 return 0;
1373
1374 /* If totalLength is a multiple of chunksize, the last byte appears
1375 * as being part of the next chunk, which does not exist.
1376 * Decrementing totalLength by one fixes that.
1377 */
1378 totalLength--;
1379 totalChunks = (AFS_CHUNK(totalLength)((totalLength < afs_FirstCSize) ? 0 : (((totalLength - afs_FirstCSize
) >> afs_LogChunk) + 1))
+ 1);
1380
1381 /* If we're a directory, we only ever have one chunk, regardless of
1382 * the size of the dir.
1383 */
1384 if (avc->f.fid.Fid.Vnode & 1 || vType(avc)((avc)->v)->v_type == VDIR)
1385 totalChunks = 1;
1386
1387 /*
1388 printf("Should have %d chunks for %u bytes\n",
1389 totalChunks, (totalLength + 1));
1390 */
1391 i = DVHash(&avc->f.fid)((((&avc->f.fid)->Fid.Vnode + (&avc->f.fid)->
Fid.Volume )) & (afs_dhashsize-1))
;
1392 ObtainWriteLock(&afs_xdcache, 1001)do { ; if (!(&afs_xdcache)->excl_locked && !(&
afs_xdcache)->readers_reading) (&afs_xdcache) -> excl_locked
= 2; else Afs_Lock_Obtain(&afs_xdcache, 2); (&afs_xdcache
)->pid_writer = (((__curthread())->td_proc)->p_pid )
; (&afs_xdcache)->src_indicator = 1001; } while (0)
;
1393 for (index = afs_dvhashTbl[i]; index != NULLIDX(-1); index = i) {
1394 i = afs_dvnextTbl[index];
1395 if (afs_indexUnique[index] == avc->f.fid.Fid.Unique) {
1396 tdc = afs_GetDSlot(index, NULL)(*(afs_cacheType->GetDSlot))(index, ((void *)0));
1397 if (!FidCmp(&tdc->f.fid, &avc->f.fid)((&tdc->f.fid)->Fid.Unique != (&avc->f.fid)->
Fid.Unique || (&tdc->f.fid)->Fid.Vnode != (&avc
->f.fid)->Fid.Vnode || (&tdc->f.fid)->Fid.Volume
!= (&avc->f.fid)->Fid.Volume || (&tdc->f.fid
)->Cell != (&avc->f.fid)->Cell)
) {
1398 totalChunks--;
1399 }
1400 ReleaseReadLock(&tdc->tlock)do { ; if (!(--((&tdc->tlock)->readers_reading)) &&
(&tdc->tlock)->wait_states) Afs_Lock_ReleaseW(&
tdc->tlock) ; if ( (&tdc->tlock)->pid_last_reader
== (((__curthread())->td_proc)->p_pid ) ) (&tdc->
tlock)->pid_last_reader =0; } while (0)
;
1401 afs_PutDCache(tdc);
1402 }
1403 }
1404 ReleaseWriteLock(&afs_xdcache)do { ; (&afs_xdcache)->excl_locked &= ~2; if ((&
afs_xdcache)->wait_states) Afs_Lock_ReleaseR(&afs_xdcache
); (&afs_xdcache)->pid_writer=0; } while (0)
;
1405
1406 /*printf("Missing %d chunks\n", totalChunks);*/
1407
1408 return (totalChunks);
1409}
1410
1411/*
1412 * afs_FindDCache
1413 *
1414 * Description:
1415 * Given the cached info for a file and a byte offset into the
1416 * file, make sure the dcache entry for that file and containing
1417 * the given byte is available, returning it to our caller.
1418 *
1419 * Parameters:
1420 * avc : Pointer to the (held) vcache entry to look in.
1421 * abyte : Which byte we want to get to.
1422 *
1423 * Returns:
1424 * Pointer to the dcache entry covering the file & desired byte,
1425 * or NULL if not found.
1426 *
1427 * Environment:
1428 * The vcache entry is held upon entry.
1429 */
1430
1431struct dcache *
1432afs_FindDCache(struct vcache *avc, afs_size_t abyte)
1433{
1434 afs_int32 chunk;
1435 afs_int32 i, index;
1436 struct dcache *tdc = NULL((void *)0);
1437
1438 AFS_STATCNT(afs_FindDCache)((afs_cmstats.callInfo.C_afs_FindDCache)++);
1439 chunk = AFS_CHUNK(abyte)((abyte < afs_FirstCSize) ? 0 : (((abyte - afs_FirstCSize)
>> afs_LogChunk) + 1))
;
1440
1441 /*
1442 * Hash on the [fid, chunk] and get the corresponding dcache index
1443 * after write-locking the dcache.
1444 */
1445 i = DCHash(&avc->f.fid, chunk)((((&avc->f.fid)->Fid.Vnode + (&avc->f.fid)->
Fid.Volume + (chunk))) & (afs_dhashsize-1))
;
1446 ObtainWriteLock(&afs_xdcache, 278)do { ; if (!(&afs_xdcache)->excl_locked && !(&
afs_xdcache)->readers_reading) (&afs_xdcache) -> excl_locked
= 2; else Afs_Lock_Obtain(&afs_xdcache, 2); (&afs_xdcache
)->pid_writer = (((__curthread())->td_proc)->p_pid )
; (&afs_xdcache)->src_indicator = 278; } while (0)
;
1447 for (index = afs_dchashTbl[i]; index != NULLIDX(-1);) {
1448 if (afs_indexUnique[index] == avc->f.fid.Fid.Unique) {
1449 tdc = afs_GetDSlot(index, NULL)(*(afs_cacheType->GetDSlot))(index, ((void *)0));
1450 ReleaseReadLock(&tdc->tlock)do { ; if (!(--((&tdc->tlock)->readers_reading)) &&
(&tdc->tlock)->wait_states) Afs_Lock_ReleaseW(&
tdc->tlock) ; if ( (&tdc->tlock)->pid_last_reader
== (((__curthread())->td_proc)->p_pid ) ) (&tdc->
tlock)->pid_last_reader =0; } while (0)
;
1451 if (!FidCmp(&tdc->f.fid, &avc->f.fid)((&tdc->f.fid)->Fid.Unique != (&avc->f.fid)->
Fid.Unique || (&tdc->f.fid)->Fid.Vnode != (&avc
->f.fid)->Fid.Vnode || (&tdc->f.fid)->Fid.Volume
!= (&avc->f.fid)->Fid.Volume || (&tdc->f.fid
)->Cell != (&avc->f.fid)->Cell)
&& chunk == tdc->f.chunk) {
1452 break; /* leaving refCount high for caller */
1453 }
1454 afs_PutDCache(tdc);
1455 }
1456 index = afs_dcnextTbl[index];
1457 }
1458 if (index != NULLIDX(-1)) {
1459 hset(afs_indexTimes[tdc->index], afs_indexCounter)((afs_indexTimes[tdc->index]) = (afs_indexCounter));
1460 hadd32(afs_indexCounter, 1)((void)((((afs_indexCounter).low ^ (int)(1)) & 0x80000000
) ? (((((afs_indexCounter).low + (int)(1)) & 0x80000000) ==
0) && (afs_indexCounter).high++) : (((afs_indexCounter
).low & (int)(1) & 0x80000000) && (afs_indexCounter
).high++)), (afs_indexCounter).low += (int)(1))
;
1461 ReleaseWriteLock(&afs_xdcache)do { ; (&afs_xdcache)->excl_locked &= ~2; if ((&
afs_xdcache)->wait_states) Afs_Lock_ReleaseR(&afs_xdcache
); (&afs_xdcache)->pid_writer=0; } while (0)
;
1462 return tdc;
1463 }
1464 ReleaseWriteLock(&afs_xdcache)do { ; (&afs_xdcache)->excl_locked &= ~2; if ((&
afs_xdcache)->wait_states) Afs_Lock_ReleaseR(&afs_xdcache
); (&afs_xdcache)->pid_writer=0; } while (0)
;
1465 return NULL((void *)0);
1466} /*afs_FindDCache */
1467
1468
1469/*!
1470 * Get a fresh dcache from the free or discarded list.
1471 *
1472 * \param avc Who's dcache is this going to be?
1473 * \param chunk The position where it will be placed in.
1474 * \param lock How are locks held.
1475 * \param ashFid If this dcache going to be used for a shadow dir,
1476 * this is it's fid.
1477 *
1478 * \note Required locks:
1479 * - afs_xdcache (W)
1480 * - avc (R if (lock & 1) set and W otherwise)
1481 * \note It write locks the new dcache. The caller must unlock it.
1482 *
1483 * \return The new dcache.
1484 */
1485struct dcache *
1486afs_AllocDCache(struct vcache *avc, afs_int32 chunk, afs_int32 lock,
1487 struct VenusFid *ashFid)
1488{
1489 struct dcache *tdc = NULL((void *)0);
1490 afs_uint32 size = 0;
1491 struct osi_file *file;
1492
1493 if (afs_discardDCList == NULLIDX(-1)
1494 || ((lock & 2) && afs_freeDCList != NULLIDX(-1))) {
1495
1496 afs_indexFlags[afs_freeDCList] &= ~IFFree2;
1497 tdc = afs_GetDSlot(afs_freeDCList, 0)(*(afs_cacheType->GetDSlot))(afs_freeDCList, 0);
1498 osi_Assert(tdc->refCount == 1)(void)((tdc->refCount == 1) || (osi_AssertFailK( "tdc->refCount == 1"
, "/home/wollman/openafs/src/afs/afs_dcache.c", 1498), 0))
;
1499 ReleaseReadLock(&tdc->tlock)do { ; if (!(--((&tdc->tlock)->readers_reading)) &&
(&tdc->tlock)->wait_states) Afs_Lock_ReleaseW(&
tdc->tlock) ; if ( (&tdc->tlock)->pid_last_reader
== (((__curthread())->td_proc)->p_pid ) ) (&tdc->
tlock)->pid_last_reader =0; } while (0)
;
1500 ObtainWriteLock(&tdc->lock, 604)do { ; if (!(&tdc->lock)->excl_locked && !(
&tdc->lock)->readers_reading) (&tdc->lock) ->
excl_locked = 2; else Afs_Lock_Obtain(&tdc->lock, 2);
(&tdc->lock)->pid_writer = (((__curthread())->td_proc
)->p_pid ); (&tdc->lock)->src_indicator = 604; }
while (0)
;
1501 afs_freeDCList = afs_dvnextTbl[tdc->index];
1502 afs_freeDCCount--;
1503 } else {
1504 afs_indexFlags[afs_discardDCList] &= ~IFDiscarded64;
1505 tdc = afs_GetDSlot(afs_discardDCList, 0)(*(afs_cacheType->GetDSlot))(afs_discardDCList, 0);
1506 osi_Assert(tdc->refCount == 1)(void)((tdc->refCount == 1) || (osi_AssertFailK( "tdc->refCount == 1"
, "/home/wollman/openafs/src/afs/afs_dcache.c", 1506), 0))
;
1507 ReleaseReadLock(&tdc->tlock)do { ; if (!(--((&tdc->tlock)->readers_reading)) &&
(&tdc->tlock)->wait_states) Afs_Lock_ReleaseW(&
tdc->tlock) ; if ( (&tdc->tlock)->pid_last_reader
== (((__curthread())->td_proc)->p_pid ) ) (&tdc->
tlock)->pid_last_reader =0; } while (0)
;
1508 ObtainWriteLock(&tdc->lock, 605)do { ; if (!(&tdc->lock)->excl_locked && !(
&tdc->lock)->readers_reading) (&tdc->lock) ->
excl_locked = 2; else Afs_Lock_Obtain(&tdc->lock, 2);
(&tdc->lock)->pid_writer = (((__curthread())->td_proc
)->p_pid ); (&tdc->lock)->src_indicator = 605; }
while (0)
;
1509 afs_discardDCList = afs_dvnextTbl[tdc->index];
1510 afs_discardDCCount--;
1511 size =
1512 ((tdc->f.chunkBytes +
1513 afs_fsfragsize) ^ afs_fsfragsize) >> 10;
1514 tdc->f.states &= ~(DRO1|DBackup2|DRW4);
1515 afs_DCMoveBucket(tdc, size, 0);
1516 afs_blocksDiscarded -= size;
1517 afs_stats_cmperf.cacheBlocksDiscarded = afs_blocksDiscarded;
1518 if (lock & 2) {
1519 /* Truncate the chunk so zeroes get filled properly */
1520 file = afs_CFileOpen(&tdc->f.inode)(void *)(*(afs_cacheType->open))(&tdc->f.inode);
1521 afs_CFileTruncate(file, 0)(*(afs_cacheType->truncate))((file), 0);
1522 afs_CFileClose(file)(*(afs_cacheType->close))(file);
1523 afs_AdjustSize(tdc, 0);
1524 }
1525 }
1526
1527 /*
1528 * Locks held:
1529 * avc->lock(R) if setLocks
1530 * avc->lock(W) if !setLocks
1531 * tdc->lock(W)
1532 * afs_xdcache(W)
1533 */
1534
1535 /*
1536 * Fill in the newly-allocated dcache record.
1537 */
1538 afs_indexFlags[tdc->index] &= ~(IFDirtyPages16 | IFAnyPages32);
1539 if (ashFid)
1540 /* Use shadow fid if provided. */
1541 tdc->f.fid = *ashFid;
1542 else
1543 /* Use normal vcache's fid otherwise. */
1544 tdc->f.fid = avc->f.fid;
1545 if (avc->f.states & CRO0x00000004)
1546 tdc->f.states = DRO1;
1547 else if (avc->f.states & CBackup0x00000002)
1548 tdc->f.states = DBackup2;
1549 else
1550 tdc->f.states = DRW4;
1551 afs_DCMoveBucket(tdc, 0, afs_DCGetBucket(avc));
1552 afs_indexUnique[tdc->index] = tdc->f.fid.Fid.Unique;
1553 if (!ashFid)
1554 hones(tdc->f.versionNo)((tdc->f.versionNo).low = 0xffffffff, (tdc->f.versionNo
).high = 0xffffffff)
; /* invalid value */
1555 tdc->f.chunk = chunk;
1556 tdc->validPos = AFS_CHUNKTOBASE(chunk)((chunk == 0) ? 0 : ((afs_size_t) afs_FirstCSize + ((afs_size_t
) (chunk - 1) << afs_LogChunk)))
;
1557 /* XXX */
1558 if (tdc->lruq.prev == &tdc->lruq)
1559 osi_Panic("lruq 1");
1560
1561 return tdc;
1562}
1563
1564/*
1565 * afs_GetDCache
1566 *
1567 * Description:
1568 * This function is called to obtain a reference to data stored in
1569 * the disk cache, locating a chunk of data containing the desired
1570 * byte and returning a reference to the disk cache entry, with its
1571 * reference count incremented.
1572 *
1573 * Parameters:
1574 * IN:
1575 * avc : Ptr to a vcache entry (unlocked)
1576 * abyte : Byte position in the file desired
1577 * areq : Request structure identifying the requesting user.
1578 * aflags : Settings as follows:
1579 * 1 : Set locks
1580 * 2 : Return after creating entry.
1581 * 4 : called from afs_vnop_write.c
1582 * *alen contains length of data to be written.
1583 * OUT:
1584 * aoffset : Set to the offset within the chunk where the resident
1585 * byte is located.
1586 * alen : Set to the number of bytes of data after the desired
1587 * byte (including the byte itself) which can be read
1588 * from this chunk.
1589 *
1590 * Environment:
1591 * The vcache entry pointed to by avc is unlocked upon entry.
1592 */
1593
1594/*
1595 * Update the vnode-to-dcache hint if we can get the vnode lock
1596 * right away. Assumes dcache entry is at least read-locked.
1597 */
1598void
1599updateV2DC(int lockVc, struct vcache *v, struct dcache *d, int src)
1600{
1601 if (!lockVc || 0 == NBObtainWriteLock(&v->lock, src)(((&v->lock)->excl_locked || (&v->lock)->
readers_reading) ? 35 : (((&v->lock) -> excl_locked
= 2), ((&v->lock)->pid_writer = (((__curthread())->
td_proc)->p_pid )), ((&v->lock)->src_indicator =
src), 0))
) {
1602 if (hsame(v->f.m.DataVersion, d->f.versionNo)((v->f.m.DataVersion).low == (d->f.versionNo).low &&
(v->f.m.DataVersion).high == (d->f.versionNo).high)
&& v->callback)
1603 v->dchint = d;
1604 if (lockVc)
1605 ReleaseWriteLock(&v->lock)do { ; (&v->lock)->excl_locked &= ~2; if ((&
v->lock)->wait_states) Afs_Lock_ReleaseR(&v->lock
); (&v->lock)->pid_writer=0; } while (0)
;
1606 }
1607}
1608
1609/* avc - Write-locked unless aflags & 1 */
1610struct dcache *
1611afs_GetDCache(struct vcache *avc, afs_size_t abyte,
1612 struct vrequest *areq, afs_size_t * aoffset,
1613 afs_size_t * alen, int aflags)
1614{
1615 afs_int32 i, code, shortcut;
1616#if defined(AFS_AIX32_ENV) || defined(AFS_SGI_ENV)
1617 afs_int32 adjustsize = 0;
1618#endif
1619 int setLocks;
1620 afs_int32 index;
1621 afs_int32 us;
1622 afs_int32 chunk;
1623 afs_size_t maxGoodLength; /* amount of good data at server */
1624 afs_size_t Position = 0;
1625 afs_int32 size, tlen; /* size of segment to transfer */
1626 struct afs_FetchOutput *tsmall = 0;
1627 struct dcache *tdc;
1628 struct osi_file *file;
1629 struct afs_conn *tc;
1630 int downDCount = 0;
1631 struct server *newCallback = NULL((void *)0);
1632 char setNewCallback;
1633 char setVcacheStatus;
1634 char doVcacheUpdate;
1635 char slowPass = 0;
1636 int doAdjustSize = 0;
1637 int doReallyAdjustSize = 0;
1638 int overWriteWholeChunk = 0;
1639 struct rx_connection *rxconn;
1640
1641#ifndef AFS_NOSTATS
1642 struct afs_stats_AccessInfo *accP; /*Ptr to access record in stats */
1643 int fromReplica; /*Are we reading from a replica? */
1644 int numFetchLoops; /*# times around the fetch/analyze loop */
1645#endif /* AFS_NOSTATS */
1646
1647 AFS_STATCNT(afs_GetDCache)((afs_cmstats.callInfo.C_afs_GetDCache)++);
1648 if (dcacheDisabled)
1649 return NULL((void *)0);
1650
1651 setLocks = aflags & 1;
1652
1653 /*
1654 * Determine the chunk number and offset within the chunk corresponding
1655 * to the desired byte.
1656 */
1657 if (avc->f.fid.Fid.Vnode & 1) { /* if (vType(avc) == VDIR) */
1658 chunk = 0;
1659 } else {
1660 chunk = AFS_CHUNK(abyte)((abyte < afs_FirstCSize) ? 0 : (((abyte - afs_FirstCSize)
>> afs_LogChunk) + 1))
;
1661 }
1662
1663 /* come back to here if we waited for the cache to drain. */
1664 RetryGetDCache:
1665
1666 setNewCallback = setVcacheStatus = 0;
1667
1668 if (setLocks) {
1669 if (slowPass)
1670 ObtainWriteLock(&avc->lock, 616)do { ; if (!(&avc->lock)->excl_locked && !(
&avc->lock)->readers_reading) (&avc->lock) ->
excl_locked = 2; else Afs_Lock_Obtain(&avc->lock, 2);
(&avc->lock)->pid_writer = (((__curthread())->td_proc
)->p_pid ); (&avc->lock)->src_indicator = 616; }
while (0)
;
1671 else
1672 ObtainReadLock(&avc->lock)do { ; if (!((&avc->lock)->excl_locked & 2)) ((
&avc->lock)->readers_reading)++; else Afs_Lock_Obtain
(&avc->lock, 1); (&avc->lock)->pid_last_reader
= (((__curthread())->td_proc)->p_pid ); } while (0)
;
1673 }
1674
1675 /*
1676 * Locks held:
1677 * avc->lock(R) if setLocks && !slowPass
1678 * avc->lock(W) if !setLocks || slowPass
1679 */
1680
1681 shortcut = 0;
1682
1683 /* check hints first! (might could use bcmp or some such...) */
1684 if ((tdc = avc->dchint)) {
1685 int dcLocked;
1686
1687 /*
1688 * The locking order between afs_xdcache and dcache lock matters.
1689 * The hint dcache entry could be anywhere, even on the free list.
1690 * Locking afs_xdcache ensures that noone is trying to pull dcache
1691 * entries from the free list, and thereby assuming them to be not
1692 * referenced and not locked.
1693 */
1694 ObtainReadLock(&afs_xdcache)do { ; if (!((&afs_xdcache)->excl_locked & 2)) ((&
afs_xdcache)->readers_reading)++; else Afs_Lock_Obtain(&
afs_xdcache, 1); (&afs_xdcache)->pid_last_reader = (((
__curthread())->td_proc)->p_pid ); } while (0)
;
1695 dcLocked = (0 == NBObtainSharedLock(&tdc->lock, 601)(((&tdc->lock)->excl_locked) ? 35 : (((&tdc->
lock) -> excl_locked = 4), ((&tdc->lock)->pid_writer
= (((__curthread())->td_proc)->p_pid )), ((&tdc->
lock)->src_indicator = 601), 0))
);
1696
1697 if (dcLocked && (tdc->index != NULLIDX(-1))
1698 && !FidCmp(&tdc->f.fid, &avc->f.fid)((&tdc->f.fid)->Fid.Unique != (&avc->f.fid)->
Fid.Unique || (&tdc->f.fid)->Fid.Vnode != (&avc
->f.fid)->Fid.Vnode || (&tdc->f.fid)->Fid.Volume
!= (&avc->f.fid)->Fid.Volume || (&tdc->f.fid
)->Cell != (&avc->f.fid)->Cell)
&& chunk == tdc->f.chunk
1699 && !(afs_indexFlags[tdc->index] & (IFFree2 | IFDiscarded64))) {
1700 /* got the right one. It might not be the right version, and it
1701 * might be fetching, but it's the right dcache entry.
1702 */
1703 /* All this code should be integrated better with what follows:
1704 * I can save a good bit more time under a write lock if I do..
1705 */
1706 ObtainWriteLock(&tdc->tlock, 603)do { ; if (!(&tdc->tlock)->excl_locked && !
(&tdc->tlock)->readers_reading) (&tdc->tlock
) -> excl_locked = 2; else Afs_Lock_Obtain(&tdc->tlock
, 2); (&tdc->tlock)->pid_writer = (((__curthread())
->td_proc)->p_pid ); (&tdc->tlock)->src_indicator
= 603; } while (0)
;
1707 tdc->refCount++;
1708 ReleaseWriteLock(&tdc->tlock)do { ; (&tdc->tlock)->excl_locked &= ~2; if ((&
tdc->tlock)->wait_states) Afs_Lock_ReleaseR(&tdc->
tlock); (&tdc->tlock)->pid_writer=0; } while (0)
;
1709
1710 ReleaseReadLock(&afs_xdcache)do { ; if (!(--((&afs_xdcache)->readers_reading)) &&
(&afs_xdcache)->wait_states) Afs_Lock_ReleaseW(&afs_xdcache
) ; if ( (&afs_xdcache)->pid_last_reader == (((__curthread
())->td_proc)->p_pid ) ) (&afs_xdcache)->pid_last_reader
=0; } while (0)
;
1711 shortcut = 1;
1712
1713 if (hsame(tdc->f.versionNo, avc->f.m.DataVersion)((tdc->f.versionNo).low == (avc->f.m.DataVersion).low &&
(tdc->f.versionNo).high == (avc->f.m.DataVersion).high
)
1714 && !(tdc->dflags & DFFetching0x04)) {
1715
1716 afs_stats_cmperf.dcacheHits++;
1717 ObtainWriteLock(&afs_xdcache, 559)do { ; if (!(&afs_xdcache)->excl_locked && !(&
afs_xdcache)->readers_reading) (&afs_xdcache) -> excl_locked
= 2; else Afs_Lock_Obtain(&afs_xdcache, 2); (&afs_xdcache
)->pid_writer = (((__curthread())->td_proc)->p_pid )
; (&afs_xdcache)->src_indicator = 559; } while (0)
;
1718 QRemove(&tdc->lruq)((&tdc->lruq)->next->prev = (&tdc->lruq)->
prev, (&tdc->lruq)->prev->next = (&tdc->lruq
)->next, (&tdc->lruq)->prev = ((void *)0), (&
tdc->lruq)->next = ((void *)0))
;
1719 QAdd(&afs_DLRU, &tdc->lruq)((&tdc->lruq)->next = (&afs_DLRU)->next, (&
tdc->lruq)->prev = (&afs_DLRU), (&afs_DLRU)->
next->prev = (&tdc->lruq), (&afs_DLRU)->next
= (&tdc->lruq))
;
1720 ReleaseWriteLock(&afs_xdcache)do { ; (&afs_xdcache)->excl_locked &= ~2; if ((&
afs_xdcache)->wait_states) Afs_Lock_ReleaseR(&afs_xdcache
); (&afs_xdcache)->pid_writer=0; } while (0)
;
1721
1722 /* Locks held:
1723 * avc->lock(R) if setLocks && !slowPass
1724 * avc->lock(W) if !setLocks || slowPass
1725 * tdc->lock(S)
1726 */
1727 goto done;
1728 }
1729 } else {
1730 if (dcLocked)
1731 ReleaseSharedLock(&tdc->lock)do { ; (&tdc->lock)->excl_locked &= ~(4 | 2); if
((&tdc->lock)->wait_states) Afs_Lock_ReleaseR(&
tdc->lock); (&tdc->lock)->pid_writer=0; } while (
0)
;
1732 ReleaseReadLock(&afs_xdcache)do { ; if (!(--((&afs_xdcache)->readers_reading)) &&
(&afs_xdcache)->wait_states) Afs_Lock_ReleaseW(&afs_xdcache
) ; if ( (&afs_xdcache)->pid_last_reader == (((__curthread
())->td_proc)->p_pid ) ) (&afs_xdcache)->pid_last_reader
=0; } while (0)
;
1733 }
1734
1735 if (!shortcut)
1736 tdc = 0;
1737 }
1738
1739 /* Locks held:
1740 * avc->lock(R) if setLocks && !slowPass
1741 * avc->lock(W) if !setLocks || slowPass
1742 * tdc->lock(S) if tdc
1743 */
1744
1745 if (!tdc) { /* If the hint wasn't the right dcache entry */
1746 /*
1747 * Hash on the [fid, chunk] and get the corresponding dcache index
1748 * after write-locking the dcache.
1749 */
1750 RetryLookup:
1751
1752 /* Locks held:
1753 * avc->lock(R) if setLocks && !slowPass
1754 * avc->lock(W) if !setLocks || slowPass
1755 */
1756
1757 i = DCHash(&avc->f.fid, chunk)((((&avc->f.fid)->Fid.Vnode + (&avc->f.fid)->
Fid.Volume + (chunk))) & (afs_dhashsize-1))
;
1758 /* check to make sure our space is fine */
1759 afs_MaybeWakeupTruncateDaemon();
1760
1761 ObtainWriteLock(&afs_xdcache, 280)do { ; if (!(&afs_xdcache)->excl_locked && !(&
afs_xdcache)->readers_reading) (&afs_xdcache) -> excl_locked
= 2; else Afs_Lock_Obtain(&afs_xdcache, 2); (&afs_xdcache
)->pid_writer = (((__curthread())->td_proc)->p_pid )
; (&afs_xdcache)->src_indicator = 280; } while (0)
;
1762 us = NULLIDX(-1);
1763 for (index = afs_dchashTbl[i]; index != NULLIDX(-1);) {
1764 if (afs_indexUnique[index] == avc->f.fid.Fid.Unique) {
1765 tdc = afs_GetDSlot(index, NULL)(*(afs_cacheType->GetDSlot))(index, ((void *)0));
1766 ReleaseReadLock(&tdc->tlock)do { ; if (!(--((&tdc->tlock)->readers_reading)) &&
(&tdc->tlock)->wait_states) Afs_Lock_ReleaseW(&
tdc->tlock) ; if ( (&tdc->tlock)->pid_last_reader
== (((__curthread())->td_proc)->p_pid ) ) (&tdc->
tlock)->pid_last_reader =0; } while (0)
;
1767 /*
1768 * Locks held:
1769 * avc->lock(R) if setLocks && !slowPass
1770 * avc->lock(W) if !setLocks || slowPass
1771 * afs_xdcache(W)
1772 */
1773 if (!FidCmp(&tdc->f.fid, &avc->f.fid)((&tdc->f.fid)->Fid.Unique != (&avc->f.fid)->
Fid.Unique || (&tdc->f.fid)->Fid.Vnode != (&avc
->f.fid)->Fid.Vnode || (&tdc->f.fid)->Fid.Volume
!= (&avc->f.fid)->Fid.Volume || (&tdc->f.fid
)->Cell != (&avc->f.fid)->Cell)
&& chunk == tdc->f.chunk) {
1774 /* Move it up in the beginning of the list */
1775 if (afs_dchashTbl[i] != index) {
1776 afs_dcnextTbl[us] = afs_dcnextTbl[index];
1777 afs_dcnextTbl[index] = afs_dchashTbl[i];
1778 afs_dchashTbl[i] = index;
1779 }
1780 ReleaseWriteLock(&afs_xdcache)do { ; (&afs_xdcache)->excl_locked &= ~2; if ((&
afs_xdcache)->wait_states) Afs_Lock_ReleaseR(&afs_xdcache
); (&afs_xdcache)->pid_writer=0; } while (0)
;
1781 ObtainSharedLock(&tdc->lock, 606)do { ; if (!(&tdc->lock)->excl_locked) (&tdc->
lock) -> excl_locked = 4; else Afs_Lock_Obtain(&tdc->
lock, 4); (&tdc->lock)->pid_writer = (((__curthread
())->td_proc)->p_pid ); (&tdc->lock)->src_indicator
= 606; } while (0)
;
1782 break; /* leaving refCount high for caller */
1783 }
1784 afs_PutDCache(tdc);
1785 tdc = 0;
1786 }
1787 us = index;
1788 index = afs_dcnextTbl[index];
1789 }
1790
1791 /*
1792 * If we didn't find the entry, we'll create one.
1793 */
1794 if (index == NULLIDX(-1)) {
1795 /*
1796 * Locks held:
1797 * avc->lock(R) if setLocks
1798 * avc->lock(W) if !setLocks
1799 * afs_xdcache(W)
1800 */
1801 afs_Trace2(afs_iclSetp, CM_TRACE_GETDCACHE1, ICL_TYPE_POINTER,(((afs_iclSetp) && (afs_iclSetp->states & 2)) ?
afs_icl_Event2(afs_iclSetp, (701087789L), (1<<24)+((2)
<<18)+((7)<<12), (long)(avc), (long)(chunk)) : 0)
1802 avc, ICL_TYPE_INT32, chunk)(((afs_iclSetp) && (afs_iclSetp->states & 2)) ?
afs_icl_Event2(afs_iclSetp, (701087789L), (1<<24)+((2)
<<18)+((7)<<12), (long)(avc), (long)(chunk)) : 0)
;
1803
1804 /* Make sure there is a free dcache entry for us to use */
1805 if (afs_discardDCList == NULLIDX(-1) && afs_freeDCList == NULLIDX(-1)) {
1806 while (1) {
1807 if (!setLocks)
1808 avc->f.states |= CDCLock0x02000000;
1809 /* just need slots */
1810 afs_GetDownD(5, (int *)0, afs_DCGetBucket(avc));
1811 if (!setLocks)
1812 avc->f.states &= ~CDCLock0x02000000;
1813 if (afs_discardDCList != NULLIDX(-1)
1814 || afs_freeDCList != NULLIDX(-1))
1815 break;
1816 /* If we can't get space for 5 mins we give up and panic */
1817 if (++downDCount > 300) {
1818 osi_Panic("getdcache");
1819 }
1820 ReleaseWriteLock(&afs_xdcache)do { ; (&afs_xdcache)->excl_locked &= ~2; if ((&
afs_xdcache)->wait_states) Afs_Lock_ReleaseR(&afs_xdcache
); (&afs_xdcache)->pid_writer=0; } while (0)
;
1821 /*
1822 * Locks held:
1823 * avc->lock(R) if setLocks
1824 * avc->lock(W) if !setLocks
1825 */
1826 afs_osi_Wait(1000, 0, 0);
1827 goto RetryLookup;
1828 }
1829 }
1830
1831 tdc = afs_AllocDCache(avc, chunk, aflags, NULL((void *)0));
1832
1833 /*
1834 * Now add to the two hash chains - note that i is still set
1835 * from the above DCHash call.
1836 */
1837 afs_dcnextTbl[tdc->index] = afs_dchashTbl[i];
1838 afs_dchashTbl[i] = tdc->index;
1839 i = DVHash(&avc->f.fid)((((&avc->f.fid)->Fid.Vnode + (&avc->f.fid)->
Fid.Volume )) & (afs_dhashsize-1))
;
1840 afs_dvnextTbl[tdc->index] = afs_dvhashTbl[i];
1841 afs_dvhashTbl[i] = tdc->index;
1842 tdc->dflags = DFEntryMod0x02;
1843 tdc->mflags = 0;
1844 afs_MaybeWakeupTruncateDaemon();
1845 ReleaseWriteLock(&afs_xdcache)do { ; (&afs_xdcache)->excl_locked &= ~2; if ((&
afs_xdcache)->wait_states) Afs_Lock_ReleaseR(&afs_xdcache
); (&afs_xdcache)->pid_writer=0; } while (0)
;
1846 ConvertWToSLock(&tdc->lock)do { ; (&tdc->lock)->excl_locked = 4; if((&tdc->
lock)->wait_states) Afs_Lock_ReleaseR(&tdc->lock); }
while (0)
;
1847 }
1848 }
1849
1850
1851 /* vcache->dcache hint failed */
1852 /*
1853 * Locks held:
1854 * avc->lock(R) if setLocks && !slowPass
1855 * avc->lock(W) if !setLocks || slowPass
1856 * tdc->lock(S)
1857 */
1858 afs_Trace4(afs_iclSetp, CM_TRACE_GETDCACHE2, ICL_TYPE_POINTER, avc,(((afs_iclSetp) && (afs_iclSetp->states & 2)) ?
afs_icl_Event4(afs_iclSetp, (701087790L), (1<<24)+((2)
<<18)+((2)<<12)+((7)<<6)+(7), (long)(avc), (
long)(tdc), (long)(((tdc->f.versionNo).low)), (long)(((avc
->f.m.DataVersion).low))) : 0)
1859 ICL_TYPE_POINTER, tdc, ICL_TYPE_INT32,(((afs_iclSetp) && (afs_iclSetp->states & 2)) ?
afs_icl_Event4(afs_iclSetp, (701087790L), (1<<24)+((2)
<<18)+((2)<<12)+((7)<<6)+(7), (long)(avc), (
long)(tdc), (long)(((tdc->f.versionNo).low)), (long)(((avc
->f.m.DataVersion).low))) : 0)
1860 hgetlo(tdc->f.versionNo), ICL_TYPE_INT32,(((afs_iclSetp) && (afs_iclSetp->states & 2)) ?
afs_icl_Event4(afs_iclSetp, (701087790L), (1<<24)+((2)
<<18)+((2)<<12)+((7)<<6)+(7), (long)(avc), (
long)(tdc), (long)(((tdc->f.versionNo).low)), (long)(((avc
->f.m.DataVersion).low))) : 0)
1861 hgetlo(avc->f.m.DataVersion))(((afs_iclSetp) && (afs_iclSetp->states & 2)) ?
afs_icl_Event4(afs_iclSetp, (701087790L), (1<<24)+((2)
<<18)+((2)<<12)+((7)<<6)+(7), (long)(avc), (
long)(tdc), (long)(((tdc->f.versionNo).low)), (long)(((avc
->f.m.DataVersion).low))) : 0)
;
1862 /*
1863 * Here we have the entry in tdc, with its refCount incremented.
1864 * Note: we don't use the S-lock on avc; it costs concurrency when
1865 * storing a file back to the server.
1866 */
1867
1868 /*
1869 * Not a newly created file so we need to check the file's length and
1870 * compare data versions since someone could have changed the data or we're
1871 * reading a file written elsewhere. We only want to bypass doing no-op
1872 * read rpcs on newly created files (dv of 0) since only then we guarantee
1873 * that this chunk's data hasn't been filled by another client.
1874 */
1875 size = AFS_CHUNKSIZE(abyte)((abyte < afs_FirstCSize) ? afs_FirstCSize : afs_OtherCSize
)
;
1876 if (aflags & 4) /* called from write */
1877 tlen = *alen;
1878 else /* called from read */
1879 tlen = tdc->validPos - abyte;
1880 Position = AFS_CHUNKTOBASE(chunk)((chunk == 0) ? 0 : ((afs_size_t) afs_FirstCSize + ((afs_size_t
) (chunk - 1) << afs_LogChunk)))
;
1881 afs_Trace4(afs_iclSetp, CM_TRACE_GETDCACHE3, ICL_TYPE_INT32, tlen,(((afs_iclSetp) && (afs_iclSetp->states & 2)) ?
afs_icl_Event4(afs_iclSetp, (701087791L), (1<<24)+((7)
<<18)+((7)<<12)+((8)<<6)+(8), (long)(tlen),
(long)(aflags), (long)((&abyte)), (long)((&Position)
)) : 0)
1882 ICL_TYPE_INT32, aflags, ICL_TYPE_OFFSET,(((afs_iclSetp) && (afs_iclSetp->states & 2)) ?
afs_icl_Event4(afs_iclSetp, (701087791L), (1<<24)+((7)
<<18)+((7)<<12)+((8)<<6)+(8), (long)(tlen),
(long)(aflags), (long)((&abyte)), (long)((&Position)
)) : 0)
1883 ICL_HANDLE_OFFSET(abyte), ICL_TYPE_OFFSET,(((afs_iclSetp) && (afs_iclSetp->states & 2)) ?
afs_icl_Event4(afs_iclSetp, (701087791L), (1<<24)+((7)
<<18)+((7)<<12)+((8)<<6)+(8), (long)(tlen),
(long)(aflags), (long)((&abyte)), (long)((&Position)
)) : 0)
1884 ICL_HANDLE_OFFSET(Position))(((afs_iclSetp) && (afs_iclSetp->states & 2)) ?
afs_icl_Event4(afs_iclSetp, (701087791L), (1<<24)+((7)
<<18)+((7)<<12)+((8)<<6)+(8), (long)(tlen),
(long)(aflags), (long)((&abyte)), (long)((&Position)
)) : 0)
;
1885 if ((aflags & 4) && (hiszero(avc->f.m.DataVersion)((avc->f.m.DataVersion).low == 0 && (avc->f.m.DataVersion
).high == 0)
))
1886 doAdjustSize = 1;
1887 if ((AFS_CHUNKTOBASE(chunk)((chunk == 0) ? 0 : ((afs_size_t) afs_FirstCSize + ((afs_size_t
) (chunk - 1) << afs_LogChunk)))
>= avc->f.m.Length) ||
1888 ((aflags & 4) && (abyte == Position) && (tlen >= size)))
1889 overWriteWholeChunk = 1;
1890 if (doAdjustSize || overWriteWholeChunk) {
1891#if defined(AFS_AIX32_ENV) || defined(AFS_SGI_ENV)
1892#ifdef AFS_SGI_ENV
1893#ifdef AFS_SGI64_ENV
1894 if (doAdjustSize)
1895 adjustsize = NBPP;
1896#else /* AFS_SGI64_ENV */
1897 if (doAdjustSize)
1898 adjustsize = 8192;
1899#endif /* AFS_SGI64_ENV */
1900#else /* AFS_SGI_ENV */
1901 if (doAdjustSize)
1902 adjustsize = 4096;
1903#endif /* AFS_SGI_ENV */
1904 if (AFS_CHUNKTOBASE(chunk)((chunk == 0) ? 0 : ((afs_size_t) afs_FirstCSize + ((afs_size_t
) (chunk - 1) << afs_LogChunk)))
+ adjustsize >= avc->f.m.Length &&
1905#else /* defined(AFS_AIX32_ENV) || defined(AFS_SGI_ENV) */
1906#if defined(AFS_SUN5_ENV)
1907 if ((doAdjustSize || (AFS_CHUNKTOBASE(chunk)((chunk == 0) ? 0 : ((afs_size_t) afs_FirstCSize + ((afs_size_t
) (chunk - 1) << afs_LogChunk)))
>= avc->f.m.Length)) &&
1908#else
1909 if (AFS_CHUNKTOBASE(chunk)((chunk == 0) ? 0 : ((afs_size_t) afs_FirstCSize + ((afs_size_t
) (chunk - 1) << afs_LogChunk)))
>= avc->f.m.Length &&
1910#endif
1911#endif /* defined(AFS_AIX32_ENV) || defined(AFS_SGI_ENV) */
1912 !hsame(avc->f.m.DataVersion, tdc->f.versionNo)((avc->f.m.DataVersion).low == (tdc->f.versionNo).low &&
(avc->f.m.DataVersion).high == (tdc->f.versionNo).high
)
)
1913 doReallyAdjustSize = 1;
1914
1915 if (doReallyAdjustSize || overWriteWholeChunk) {
1916 /* no data in file to read at this position */
1917 UpgradeSToWLock(&tdc->lock, 607)do { ; if (!(&tdc->lock)->readers_reading) (&tdc
->lock)->excl_locked = 2; else Afs_Lock_Obtain(&tdc
->lock, 6); (&tdc->lock)->pid_writer = (((__curthread
())->td_proc)->p_pid ); (&tdc->lock)->src_indicator
= 607; } while (0)
;
1918 file = afs_CFileOpen(&tdc->f.inode)(void *)(*(afs_cacheType->open))(&tdc->f.inode);
1919 afs_CFileTruncate(file, 0)(*(afs_cacheType->truncate))((file), 0);
1920 afs_CFileClose(file)(*(afs_cacheType->close))(file);
1921 afs_AdjustSize(tdc, 0);
1922 hset(tdc->f.versionNo, avc->f.m.DataVersion)((tdc->f.versionNo) = (avc->f.m.DataVersion));
1923 tdc->dflags |= DFEntryMod0x02;
1924
1925 ConvertWToSLock(&tdc->lock)do { ; (&tdc->lock)->excl_locked = 4; if((&tdc->
lock)->wait_states) Afs_Lock_ReleaseR(&tdc->lock); }
while (0)
;
1926 }
1927 }
1928
1929 /*
1930 * We must read in the whole chunk if the version number doesn't
1931 * match.
1932 */
1933 if (aflags & 2) {
1934 /* don't need data, just a unique dcache entry */
1935 ObtainWriteLock(&afs_xdcache, 608)do { ; if (!(&afs_xdcache)->excl_locked && !(&
afs_xdcache)->readers_reading) (&afs_xdcache) -> excl_locked
= 2; else Afs_Lock_Obtain(&afs_xdcache, 2); (&afs_xdcache
)->pid_writer = (((__curthread())->td_proc)->p_pid )
; (&afs_xdcache)->src_indicator = 608; } while (0)
;
1936 hset(afs_indexTimes[tdc->index], afs_indexCounter)((afs_indexTimes[tdc->index]) = (afs_indexCounter));
1937 hadd32(afs_indexCounter, 1)((void)((((afs_indexCounter).low ^ (int)(1)) & 0x80000000
) ? (((((afs_indexCounter).low + (int)(1)) & 0x80000000) ==
0) && (afs_indexCounter).high++) : (((afs_indexCounter
).low & (int)(1) & 0x80000000) && (afs_indexCounter
).high++)), (afs_indexCounter).low += (int)(1))
;
1938 ReleaseWriteLock(&afs_xdcache)do { ; (&afs_xdcache)->excl_locked &= ~2; if ((&
afs_xdcache)->wait_states) Afs_Lock_ReleaseR(&afs_xdcache
); (&afs_xdcache)->pid_writer=0; } while (0)
;
1939
1940 updateV2DC(setLocks, avc, tdc, 553);
1941 if (vType(avc)((avc)->v)->v_type == VDIR)
1942 *aoffset = abyte;
1943 else
1944 *aoffset = AFS_CHUNKOFFSET(abyte)((abyte < afs_FirstCSize) ? abyte : ((abyte - afs_FirstCSize
) & (afs_OtherCSize - 1)))
;
1945 if (tdc->validPos < abyte)
1946 *alen = (afs_size_t) 0;
1947 else
1948 *alen = tdc->validPos - abyte;
1949 ReleaseSharedLock(&tdc->lock)do { ; (&tdc->lock)->excl_locked &= ~(4 | 2); if
((&tdc->lock)->wait_states) Afs_Lock_ReleaseR(&
tdc->lock); (&tdc->lock)->pid_writer=0; } while (
0)
;
1950 if (setLocks) {
1951 if (slowPass)
1952 ReleaseWriteLock(&avc->lock)do { ; (&avc->lock)->excl_locked &= ~2; if ((&
avc->lock)->wait_states) Afs_Lock_ReleaseR(&avc->
lock); (&avc->lock)->pid_writer=0; } while (0)
;
1953 else
1954 ReleaseReadLock(&avc->lock)do { ; if (!(--((&avc->lock)->readers_reading)) &&
(&avc->lock)->wait_states) Afs_Lock_ReleaseW(&
avc->lock) ; if ( (&avc->lock)->pid_last_reader ==
(((__curthread())->td_proc)->p_pid ) ) (&avc->lock
)->pid_last_reader =0; } while (0)
;
1955 }
1956 return tdc; /* check if we're done */
1957 }
1958
1959 /*
1960 * Locks held:
1961 * avc->lock(R) if setLocks && !slowPass
1962 * avc->lock(W) if !setLocks || slowPass
1963 * tdc->lock(S)
1964 */
1965 osi_Assert((setLocks && !slowPass) || WriteLocked(&avc->lock))(void)(((setLocks && !slowPass) || ((&avc->lock
)->excl_locked & 2)) || (osi_AssertFailK( "(setLocks && !slowPass) || WriteLocked(&avc->lock)"
, "/home/wollman/openafs/src/afs/afs_dcache.c", 1965), 0))
;
1966
1967 setNewCallback = setVcacheStatus = 0;
1968
1969 /*
1970 * Locks held:
1971 * avc->lock(R) if setLocks && !slowPass
1972 * avc->lock(W) if !setLocks || slowPass
1973 * tdc->lock(S)
1974 */
1975 if (!hsame(avc->f.m.DataVersion, tdc->f.versionNo)((avc->f.m.DataVersion).low == (tdc->f.versionNo).low &&
(avc->f.m.DataVersion).high == (tdc->f.versionNo).high
)
&& !overWriteWholeChunk) {
1976 /*
1977 * Version number mismatch.
1978 */
1979 /*
1980 * If we are disconnected, then we can't do much of anything
1981 * because the data doesn't match the file.
1982 */
1983 if (AFS_IS_DISCONNECTED(afs_is_disconnected)) {
1984 ReleaseSharedLock(&tdc->lock)do { ; (&tdc->lock)->excl_locked &= ~(4 | 2); if
((&tdc->lock)->wait_states) Afs_Lock_ReleaseR(&
tdc->lock); (&tdc->lock)->pid_writer=0; } while (
0)
;
1985 if (setLocks) {
1986 if (slowPass)
1987 ReleaseWriteLock(&avc->lock)do { ; (&avc->lock)->excl_locked &= ~2; if ((&
avc->lock)->wait_states) Afs_Lock_ReleaseR(&avc->
lock); (&avc->lock)->pid_writer=0; } while (0)
;
1988 else
1989 ReleaseReadLock(&avc->lock)do { ; if (!(--((&avc->lock)->readers_reading)) &&
(&avc->lock)->wait_states) Afs_Lock_ReleaseW(&
avc->lock) ; if ( (&avc->lock)->pid_last_reader ==
(((__curthread())->td_proc)->p_pid ) ) (&avc->lock
)->pid_last_reader =0; } while (0)
;
1990 }
1991 /* Flush the Dcache */
1992 afs_PutDCache(tdc);
1993
1994 return NULL((void *)0);
1995 }
1996 UpgradeSToWLock(&tdc->lock, 609)do { ; if (!(&tdc->lock)->readers_reading) (&tdc
->lock)->excl_locked = 2; else Afs_Lock_Obtain(&tdc
->lock, 6); (&tdc->lock)->pid_writer = (((__curthread
())->td_proc)->p_pid ); (&tdc->lock)->src_indicator
= 609; } while (0)
;
1997
1998 /*
1999 * If data ever existed for this vnode, and this is a text object,
2000 * do some clearing. Now, you'd think you need only do the flush
2001 * when VTEXT is on, but VTEXT is turned off when the text object
2002 * is freed, while pages are left lying around in memory marked
2003 * with this vnode. If we would reactivate (create a new text
2004 * object from) this vnode, we could easily stumble upon some of
2005 * these old pages in pagein. So, we always flush these guys.
2006 * Sun has a wonderful lack of useful invariants in this system.
2007 *
2008 * avc->flushDV is the data version # of the file at the last text
2009 * flush. Clearly, at least, we don't have to flush the file more
2010 * often than it changes
2011 */
2012 if (hcmp(avc->flushDV, avc->f.m.DataVersion)((avc->flushDV).high<(avc->f.m.DataVersion).high? -1
: ((avc->flushDV).high > (avc->f.m.DataVersion).high
? 1 : ((avc->flushDV).low <(avc->f.m.DataVersion).low
? -1 : ((avc->flushDV).low > (avc->f.m.DataVersion).
low? 1 : 0))))
< 0) {
2013 /*
2014 * By here, the cache entry is always write-locked. We can
2015 * deadlock if we call osi_Flush with the cache entry locked...
2016 * Unlock the dcache too.
2017 */
2018 ReleaseWriteLock(&tdc->lock)do { ; (&tdc->lock)->excl_locked &= ~2; if ((&
tdc->lock)->wait_states) Afs_Lock_ReleaseR(&tdc->
lock); (&tdc->lock)->pid_writer=0; } while (0)
;
2019 if (setLocks && !slowPass)
2020 ReleaseReadLock(&avc->lock)do { ; if (!(--((&avc->lock)->readers_reading)) &&
(&avc->lock)->wait_states) Afs_Lock_ReleaseW(&
avc->lock) ; if ( (&avc->lock)->pid_last_reader ==
(((__curthread())->td_proc)->p_pid ) ) (&avc->lock
)->pid_last_reader =0; } while (0)
;
2021 else
2022 ReleaseWriteLock(&avc->lock)do { ; (&avc->lock)->excl_locked &= ~2; if ((&
avc->lock)->wait_states) Afs_Lock_ReleaseR(&avc->
lock); (&avc->lock)->pid_writer=0; } while (0)
;
2023
2024 osi_FlushText(avc);
2025 /*
2026 * Call osi_FlushPages in open, read/write, and map, since it
2027 * is too hard here to figure out if we should lock the
2028 * pvnLock.
2029 */
2030 if (setLocks && !slowPass)
2031 ObtainReadLock(&avc->lock)do { ; if (!((&avc->lock)->excl_locked & 2)) ((
&avc->lock)->readers_reading)++; else Afs_Lock_Obtain
(&avc->lock, 1); (&avc->lock)->pid_last_reader
= (((__curthread())->td_proc)->p_pid ); } while (0)
;
2032 else
2033 ObtainWriteLock(&avc->lock, 66)do { ; if (!(&avc->lock)->excl_locked && !(
&avc->lock)->readers_reading) (&avc->lock) ->
excl_locked = 2; else Afs_Lock_Obtain(&avc->lock, 2);
(&avc->lock)->pid_writer = (((__curthread())->td_proc
)->p_pid ); (&avc->lock)->src_indicator = 66; } while
(0)
;
2034 ObtainWriteLock(&tdc->lock, 610)do { ; if (!(&tdc->lock)->excl_locked && !(
&tdc->lock)->readers_reading) (&tdc->lock) ->
excl_locked = 2; else Afs_Lock_Obtain(&tdc->lock, 2);
(&tdc->lock)->pid_writer = (((__curthread())->td_proc
)->p_pid ); (&tdc->lock)->src_indicator = 610; }
while (0)
;
2035 }
2036
2037 /*
2038 * Locks held:
2039 * avc->lock(R) if setLocks && !slowPass
2040 * avc->lock(W) if !setLocks || slowPass
2041 * tdc->lock(W)
2042 */
2043
2044 /* Watch for standard race condition around osi_FlushText */
2045 if (hsame(avc->f.m.DataVersion, tdc->f.versionNo)((avc->f.m.DataVersion).low == (tdc->f.versionNo).low &&
(avc->f.m.DataVersion).high == (tdc->f.versionNo).high
)
) {
2046 updateV2DC(setLocks, avc, tdc, 569); /* set hint */
2047 afs_stats_cmperf.dcacheHits++;
2048 ConvertWToSLock(&tdc->lock)do { ; (&tdc->lock)->excl_locked = 4; if((&tdc->
lock)->wait_states) Afs_Lock_ReleaseR(&tdc->lock); }
while (0)
;
2049 goto done;
2050 }
2051
2052 /* Sleep here when cache needs to be drained. */
2053 if (setLocks && !slowPass
2054 && (afs_blocksUsed >
2055 PERCENT(CM_WAITFORDRAINPCT, afs_cacheBlocks)((afs_cacheBlocks & 0xffe00000) ? ((afs_cacheBlocks) / 100
* (98)) : ((98) * (afs_cacheBlocks) / 100))
)) {
2056 /* Make sure truncate daemon is running */
2057 afs_MaybeWakeupTruncateDaemon();
2058 ObtainWriteLock(&tdc->tlock, 614)do { ; if (!(&tdc->tlock)->excl_locked && !
(&tdc->tlock)->readers_reading) (&tdc->tlock
) -> excl_locked = 2; else Afs_Lock_Obtain(&tdc->tlock
, 2); (&tdc->tlock)->pid_writer = (((__curthread())
->td_proc)->p_pid ); (&tdc->tlock)->src_indicator
= 614; } while (0)
;
2059 tdc->refCount--; /* we'll re-obtain the dcache when we re-try. */
2060 ReleaseWriteLock(&tdc->tlock)do { ; (&tdc->tlock)->excl_locked &= ~2; if ((&
tdc->tlock)->wait_states) Afs_Lock_ReleaseR(&tdc->
tlock); (&tdc->tlock)->pid_writer=0; } while (0)
;
2061 ReleaseWriteLock(&tdc->lock)do { ; (&tdc->lock)->excl_locked &= ~2; if ((&
tdc->lock)->wait_states) Afs_Lock_ReleaseR(&tdc->
lock); (&tdc->lock)->pid_writer=0; } while (0)
;
2062 ReleaseReadLock(&avc->lock)do { ; if (!(--((&avc->lock)->readers_reading)) &&
(&avc->lock)->wait_states) Afs_Lock_ReleaseW(&
avc->lock) ; if ( (&avc->lock)->pid_last_reader ==
(((__curthread())->td_proc)->p_pid ) ) (&avc->lock
)->pid_last_reader =0; } while (0)
;
2063 while ((afs_blocksUsed - afs_blocksDiscarded) >
2064 PERCENT(CM_WAITFORDRAINPCT, afs_cacheBlocks)((afs_cacheBlocks & 0xffe00000) ? ((afs_cacheBlocks) / 100
* (98)) : ((98) * (afs_cacheBlocks) / 100))
) {
2065 afs_WaitForCacheDrain = 1;
2066 afs_osi_Sleep(&afs_WaitForCacheDrain);
2067 }
2068 afs_MaybeFreeDiscardedDCache();
2069 /* need to check if someone else got the chunk first. */
2070 goto RetryGetDCache;
2071 }
2072
2073 /* Do not fetch data beyond truncPos. */
2074 maxGoodLength = avc->f.m.Length;
2075 if (avc->f.truncPos < maxGoodLength)
2076 maxGoodLength = avc->f.truncPos;
2077 Position = AFS_CHUNKBASE(abyte)((abyte < afs_FirstCSize) ? 0 : (((abyte - afs_FirstCSize)
& ~(afs_OtherCSize - 1)) + afs_FirstCSize))
;
2078 if (vType(avc)((avc)->v)->v_type == VDIR) {
2079 size = avc->f.m.Length;
2080 if (size > tdc->f.chunkBytes) {
2081 /* pre-reserve space for file */
2082 afs_AdjustSize(tdc, size);
2083 }
2084 size = 999999999; /* max size for transfer */
2085 } else {
2086 size = AFS_CHUNKSIZE(abyte)((abyte < afs_FirstCSize) ? afs_FirstCSize : afs_OtherCSize
)
; /* expected max size */
2087 /* don't read past end of good data on server */
2088 if (Position + size > maxGoodLength)
2089 size = maxGoodLength - Position;
2090 if (size < 0)
2091 size = 0; /* Handle random races */
2092 if (size > tdc->f.chunkBytes) {
2093 /* pre-reserve space for file */
2094 afs_AdjustSize(tdc, size); /* changes chunkBytes */
2095 /* max size for transfer still in size */
2096 }
2097 }
2098 if (afs_mariner && !tdc->f.chunk)
2099 afs_MarinerLog("fetch$Fetching", avc); /* , Position, size, afs_indexCounter ); */
2100 /*
2101 * Right now, we only have one tool, and it's a hammer. So, we
2102 * fetch the whole file.
2103 */
2104 DZap(tdc); /* pages in cache may be old */
2105 file = afs_CFileOpen(&tdc->f.inode)(void *)(*(afs_cacheType->open))(&tdc->f.inode);
2106 afs_RemoveVCB(&avc->f.fid);
2107 tdc->f.states |= DWriting8;
2108 tdc->dflags |= DFFetching0x04;
2109 tdc->validPos = Position; /* which is AFS_CHUNKBASE(abyte) */
2110 if (tdc->mflags & DFFetchReq0x10) {
2111 tdc->mflags &= ~DFFetchReq0x10;
2112 if (afs_osi_Wakeup(&tdc->validPos) == 0)
2113 afs_Trace4(afs_iclSetp, CM_TRACE_DCACHEWAKE, ICL_TYPE_STRING,(((afs_iclSetp) && (afs_iclSetp->states & 2)) ?
afs_icl_Event4(afs_iclSetp, (701087894L), (1<<24)+((4)
<<18)+((7)<<12)+((2)<<6)+(7), (long)("/home/wollman/openafs/src/afs/afs_dcache.c"
), (long)(2114), (long)(tdc), (long)(tdc->dflags)) : 0)
2114 __FILE__, ICL_TYPE_INT32, __LINE__,(((afs_iclSetp) && (afs_iclSetp->states & 2)) ?
afs_icl_Event4(afs_iclSetp, (701087894L), (1<<24)+((4)
<<18)+((7)<<12)+((2)<<6)+(7), (long)("/home/wollman/openafs/src/afs/afs_dcache.c"
), (long)(2114), (long)(tdc), (long)(tdc->dflags)) : 0)
2115 ICL_TYPE_POINTER, tdc, ICL_TYPE_INT32,(((afs_iclSetp) && (afs_iclSetp->states & 2)) ?
afs_icl_Event4(afs_iclSetp, (701087894L), (1<<24)+((4)
<<18)+((7)<<12)+((2)<<6)+(7), (long)("/home/wollman/openafs/src/afs/afs_dcache.c"
), (long)(2114), (long)(tdc), (long)(tdc->dflags)) : 0)
2116 tdc->dflags)(((afs_iclSetp) && (afs_iclSetp->states & 2)) ?
afs_icl_Event4(afs_iclSetp, (701087894L), (1<<24)+((4)
<<18)+((7)<<12)+((2)<<6)+(7), (long)("/home/wollman/openafs/src/afs/afs_dcache.c"
), (long)(2114), (long)(tdc), (long)(tdc->dflags)) : 0)
;
2117 }
2118 tsmall =
2119 (struct afs_FetchOutput *)osi_AllocLargeSpace(sizeof(struct afs_FetchOutput));
2120 setVcacheStatus = 0;
2121#ifndef AFS_NOSTATS
2122 /*
2123 * Remember if we are doing the reading from a replicated volume,
2124 * and how many times we've zipped around the fetch/analyze loop.
2125 */
2126 fromReplica = (avc->f.states & CRO0x00000004) ? 1 : 0;
2127 numFetchLoops = 0;
2128 accP = &(afs_stats_cmfullperf.accessinf);
2129 if (fromReplica)
2130 (accP->replicatedRefs)++;
2131 else
2132 (accP->unreplicatedRefs)++;
2133#endif /* AFS_NOSTATS */
2134 /* this is a cache miss */
2135 afs_Trace4(afs_iclSetp, CM_TRACE_FETCHPROC, ICL_TYPE_POINTER, avc,(((afs_iclSetp) && (afs_iclSetp->states & 2)) ?
afs_icl_Event4(afs_iclSetp, (701087758L), (1<<24)+((2)
<<18)+((5)<<12)+((8)<<6)+(7), (long)(avc), (
long)(&(avc->f.fid)), (long)((&Position)), (long)(
size)) : 0)
2136 ICL_TYPE_FID, &(avc->f.fid), ICL_TYPE_OFFSET,(((afs_iclSetp) && (afs_iclSetp->states & 2)) ?
afs_icl_Event4(afs_iclSetp, (701087758L), (1<<24)+((2)
<<18)+((5)<<12)+((8)<<6)+(7), (long)(avc), (
long)(&(avc->f.fid)), (long)((&Position)), (long)(
size)) : 0)
2137 ICL_HANDLE_OFFSET(Position), ICL_TYPE_INT32, size)(((afs_iclSetp) && (afs_iclSetp->states & 2)) ?
afs_icl_Event4(afs_iclSetp, (701087758L), (1<<24)+((2)
<<18)+((5)<<12)+((8)<<6)+(7), (long)(avc), (
long)(&(avc->f.fid)), (long)((&Position)), (long)(
size)) : 0)
;
2138
2139 if (size)
2140 afs_stats_cmperf.dcacheMisses++;
2141 code = 0;
2142 /*
2143 * Dynamic root support: fetch data from local memory.
2144 */
2145 if (afs_IsDynroot(avc)) {
2146 char *dynrootDir;
2147 int dynrootLen;
2148
2149 afs_GetDynroot(&dynrootDir, &dynrootLen, &tsmall->OutStatus);
2150
2151 dynrootDir += Position;
2152 dynrootLen -= Position;
2153 if (size > dynrootLen)
2154 size = dynrootLen;
2155 if (size < 0)
2156 size = 0;
2157 code = afs_CFileWrite(file, 0, dynrootDir, size)(*(afs_cacheType->fwrite))(file, 0, dynrootDir, size);
2158 afs_PutDynroot();
2159
2160 if (code == size)
2161 code = 0;
2162 else
2163 code = -1;
2164
2165 tdc->validPos = Position + size;
2166 afs_CFileTruncate(file, size)(*(afs_cacheType->truncate))((file), size); /* prune it */
2167 } else if (afs_IsDynrootMount(avc)) {
2168 char *dynrootDir;
2169 int dynrootLen;
2170
2171 afs_GetDynrootMount(&dynrootDir, &dynrootLen, &tsmall->OutStatus);
2172
2173 dynrootDir += Position;
2174 dynrootLen -= Position;
2175 if (size > dynrootLen)
2176 size = dynrootLen;
2177 if (size < 0)
2178 size = 0;
2179 code = afs_CFileWrite(file, 0, dynrootDir, size)(*(afs_cacheType->fwrite))(file, 0, dynrootDir, size);
2180 afs_PutDynroot();
2181
2182 if (code == size)
2183 code = 0;
2184 else
2185 code = -1;
2186
2187 tdc->validPos = Position + size;
2188 afs_CFileTruncate(file, size)(*(afs_cacheType->truncate))((file), size); /* prune it */
2189 } else
2190 /*
2191 * Not a dynamic vnode: do the real fetch.
2192 */
2193 do {
2194 /*
2195 * Locks held:
2196 * avc->lock(R) if setLocks && !slowPass
2197 * avc->lock(W) if !setLocks || slowPass
2198 * tdc->lock(W)
2199 */
2200
2201 tc = afs_Conn(&avc->f.fid, areq, SHARED_LOCK4, &rxconn);
2202 if (tc) {
2203#ifndef AFS_NOSTATS
2204 numFetchLoops++;
2205 if (fromReplica)
2206 (accP->numReplicasAccessed)++;
2207
2208#endif /* AFS_NOSTATS */
2209 if (!setLocks || slowPass) {
2210 avc->callback = tc->parent->srvr->server;
2211 } else {
2212 newCallback = tc->parent->srvr->server;
2213 setNewCallback = 1;
2214 }
2215 i = osi_Time()time_second;
Value stored to 'i' is never read
2216 code = afs_CacheFetchProc(tc, rxconn, file, Position, tdc,
2217 avc, size, tsmall);
2218 } else
2219 code = -1;
2220
2221 if (code == 0) {
2222 /* callback could have been broken (or expired) in a race here,
2223 * but we return the data anyway. It's as good as we knew about
2224 * when we started. */
2225 /*
2226 * validPos is updated by CacheFetchProc, and can only be
2227 * modifed under a dcache write lock, which we've blocked out
2228 */
2229 size = tdc->validPos - Position; /* actual segment size */
2230 if (size < 0)
2231 size = 0;
2232 afs_CFileTruncate(file, size)(*(afs_cacheType->truncate))((file), size); /* prune it */
2233 } else {
2234 if (!setLocks || slowPass) {
2235 ObtainWriteLock(&afs_xcbhash, 453)do { ; if (!(&afs_xcbhash)->excl_locked && !(&
afs_xcbhash)->readers_reading) (&afs_xcbhash) -> excl_locked
= 2; else Afs_Lock_Obtain(&afs_xcbhash, 2); (&afs_xcbhash
)->pid_writer = (((__curthread())->td_proc)->p_pid )
; (&afs_xcbhash)->src_indicator = 453; } while (0)
;
2236 afs_DequeueCallback(avc);
2237 avc->f.states &= ~(CStatd0x00000001 | CUnique0x00001000);
2238 avc->callback = NULL((void *)0);
2239 ReleaseWriteLock(&afs_xcbhash)do { ; (&afs_xcbhash)->excl_locked &= ~2; if ((&
afs_xcbhash)->wait_states) Afs_Lock_ReleaseR(&afs_xcbhash
); (&afs_xcbhash)->pid_writer=0; } while (0)
;
2240 if (avc->f.fid.Fid.Vnode & 1 || (vType(avc)((avc)->v)->v_type == VDIR))
2241 osi_dnlc_purgedp(avc);
2242 } else {
2243 /* Something lost. Forget about performance, and go
2244 * back with a vcache write lock.
2245 */
2246 afs_CFileTruncate(file, 0)(*(afs_cacheType->truncate))((file), 0);
2247 afs_AdjustSize(tdc, 0);
2248 afs_CFileClose(file)(*(afs_cacheType->close))(file);
2249 osi_FreeLargeSpace(tsmall);
2250 tsmall = 0;
2251 ReleaseWriteLock(&tdc->lock)do { ; (&tdc->lock)->excl_locked &= ~2; if ((&
tdc->lock)->wait_states) Afs_Lock_ReleaseR(&tdc->
lock); (&tdc->lock)->pid_writer=0; } while (0)
;
2252 afs_PutDCache(tdc);
2253 tdc = 0;
2254 ReleaseReadLock(&avc->lock)do { ; if (!(--((&avc->lock)->readers_reading)) &&
(&avc->lock)->wait_states) Afs_Lock_ReleaseW(&
avc->lock) ; if ( (&avc->lock)->pid_last_reader ==
(((__curthread())->td_proc)->p_pid ) ) (&avc->lock
)->pid_last_reader =0; } while (0)
;
2255 slowPass = 1;
2256 goto RetryGetDCache;
2257 }
2258 }
2259
2260 } while (afs_Analyze
2261 (tc, rxconn, code, &avc->f.fid, areq,
2262 AFS_STATS_FS_RPCIDX_FETCHDATA0, SHARED_LOCK4, NULL((void *)0)));
2263
2264 /*
2265 * Locks held:
2266 * avc->lock(R) if setLocks && !slowPass
2267 * avc->lock(W) if !setLocks || slowPass
2268 * tdc->lock(W)
2269 */
2270
2271#ifndef AFS_NOSTATS
2272 /*
2273 * In the case of replicated access, jot down info on the number of
2274 * attempts it took before we got through or gave up.
2275 */
2276 if (fromReplica) {
2277 if (numFetchLoops <= 1)
2278 (accP->refFirstReplicaOK)++;
2279 if (numFetchLoops > accP->maxReplicasPerRef)
2280 accP->maxReplicasPerRef = numFetchLoops;
2281 }
2282#endif /* AFS_NOSTATS */
2283
2284 tdc->dflags &= ~DFFetching0x04;
2285 if (afs_osi_Wakeup(&tdc->validPos) == 0)
2286 afs_Trace4(afs_iclSetp, CM_TRACE_DCACHEWAKE, ICL_TYPE_STRING,(((afs_iclSetp) && (afs_iclSetp->states & 2)) ?
afs_icl_Event4(afs_iclSetp, (701087894L), (1<<24)+((4)
<<18)+((7)<<12)+((2)<<6)+(7), (long)("/home/wollman/openafs/src/afs/afs_dcache.c"
), (long)(2287), (long)(tdc), (long)(tdc->dflags)) : 0)
2287 __FILE__, ICL_TYPE_INT32, __LINE__, ICL_TYPE_POINTER,(((afs_iclSetp) && (afs_iclSetp->states & 2)) ?
afs_icl_Event4(afs_iclSetp, (701087894L), (1<<24)+((4)
<<18)+((7)<<12)+((2)<<6)+(7), (long)("/home/wollman/openafs/src/afs/afs_dcache.c"
), (long)(2287), (long)(tdc), (long)(tdc->dflags)) : 0)
2288 tdc, ICL_TYPE_INT32, tdc->dflags)(((afs_iclSetp) && (afs_iclSetp->states & 2)) ?
afs_icl_Event4(afs_iclSetp, (701087894L), (1<<24)+((4)
<<18)+((7)<<12)+((2)<<6)+(7), (long)("/home/wollman/openafs/src/afs/afs_dcache.c"
), (long)(2287), (long)(tdc), (long)(tdc->dflags)) : 0)
;
2289 if (avc->execsOrWriters == 0)
2290 tdc->f.states &= ~DWriting8;
2291
2292 /* now, if code != 0, we have an error and should punt.
2293 * note that we have the vcache write lock, either because
2294 * !setLocks or slowPass.
2295 */
2296 if (code) {
2297 afs_CFileTruncate(file, 0)(*(afs_cacheType->truncate))((file), 0);
2298 afs_AdjustSize(tdc, 0);
2299 afs_CFileClose(file)(*(afs_cacheType->close))(file);
2300 ZapDCE(tdc)do { (tdc)->f.fid.Fid.Unique = 0; afs_indexUnique[(tdc)->
index] = 0; (tdc)->dflags |= 0x02; } while(0)
; /* sets DFEntryMod */
2301 if (vType(avc)((avc)->v)->v_type == VDIR) {
2302 DZap(tdc);
2303 }
2304 tdc->f.states &= ~(DRO1|DBackup2|DRW4);
2305 afs_DCMoveBucket(tdc, 0, 0);
2306 ReleaseWriteLock(&tdc->lock)do { ; (&tdc->lock)->excl_locked &= ~2; if ((&
tdc->lock)->wait_states) Afs_Lock_ReleaseR(&tdc->
lock); (&tdc->lock)->pid_writer=0; } while (0)
;
2307 afs_PutDCache(tdc);
2308 if (!afs_IsDynroot(avc)) {
2309 ObtainWriteLock(&afs_xcbhash, 454)do { ; if (!(&afs_xcbhash)->excl_locked && !(&
afs_xcbhash)->readers_reading) (&afs_xcbhash) -> excl_locked
= 2; else Afs_Lock_Obtain(&afs_xcbhash, 2); (&afs_xcbhash
)->pid_writer = (((__curthread())->td_proc)->p_pid )
; (&afs_xcbhash)->src_indicator = 454; } while (0)
;
2310 afs_DequeueCallback(avc);
2311 avc->f.states &= ~(CStatd0x00000001 | CUnique0x00001000);
2312 ReleaseWriteLock(&afs_xcbhash)do { ; (&afs_xcbhash)->excl_locked &= ~2; if ((&
afs_xcbhash)->wait_states) Afs_Lock_ReleaseR(&afs_xcbhash
); (&afs_xcbhash)->pid_writer=0; } while (0)
;
2313 if (avc->f.fid.Fid.Vnode & 1 || (vType(avc)((avc)->v)->v_type == VDIR))
2314 osi_dnlc_purgedp(avc);
2315 /*
2316 * Locks held:
2317 * avc->lock(W); assert(!setLocks || slowPass)
2318 */
2319 osi_Assert(!setLocks || slowPass)(void)((!setLocks || slowPass) || (osi_AssertFailK( "!setLocks || slowPass"
, "/home/wollman/openafs/src/afs/afs_dcache.c", 2319), 0))
;
2320 }
2321 tdc = NULL((void *)0);
2322 goto done;
2323 }
2324
2325 /* otherwise we copy in the just-fetched info */
2326 afs_CFileClose(file)(*(afs_cacheType->close))(file);
2327 afs_AdjustSize(tdc, size); /* new size */
2328 /*
2329 * Copy appropriate fields into vcache. Status is
2330 * copied later where we selectively acquire the
2331 * vcache write lock.
2332 */
2333 if (slowPass)
2334 afs_ProcessFS(avc, &tsmall->OutStatus, areq);
2335 else
2336 setVcacheStatus = 1;
2337 hset64(tdc->f.versionNo, tsmall->OutStatus.dataVersionHigh,((tdc->f.versionNo).high = (tsmall->OutStatus.dataVersionHigh
), (tdc->f.versionNo).low = (tsmall->OutStatus.DataVersion
))
2338 tsmall->OutStatus.DataVersion)((tdc->f.versionNo).high = (tsmall->OutStatus.dataVersionHigh
), (tdc->f.versionNo).low = (tsmall->OutStatus.DataVersion
))
;
2339 tdc->dflags |= DFEntryMod0x02;
2340 afs_indexFlags[tdc->index] |= IFEverUsed1;
2341 ConvertWToSLock(&tdc->lock)do { ; (&tdc->lock)->excl_locked = 4; if((&tdc->
lock)->wait_states) Afs_Lock_ReleaseR(&tdc->lock); }
while (0)
;
2342 } /*Data version numbers don't match */
2343 else {
2344 /*
2345 * Data version numbers match.
2346 */
2347 afs_stats_cmperf.dcacheHits++;
2348 } /*Data version numbers match */
2349
2350 updateV2DC(setLocks, avc, tdc, 335); /* set hint */
2351 done:
2352 /*
2353 * Locks held:
2354 * avc->lock(R) if setLocks && !slowPass
2355 * avc->lock(W) if !setLocks || slowPass
2356 * tdc->lock(S) if tdc
2357 */
2358
2359 /*
2360 * See if this was a reference to a file in the local cell.
2361 */
2362 if (afs_IsPrimaryCellNum(avc->f.fid.Cell))
2363 afs_stats_cmperf.dlocalAccesses++;
2364 else
2365 afs_stats_cmperf.dremoteAccesses++;
2366
2367 /* Fix up LRU info */
2368
2369 if (tdc) {
2370 ObtainWriteLock(&afs_xdcache, 602)do { ; if (!(&afs_xdcache)->excl_locked && !(&
afs_xdcache)->readers_reading) (&afs_xdcache) -> excl_locked
= 2; else Afs_Lock_Obtain(&afs_xdcache, 2); (&afs_xdcache
)->pid_writer = (((__curthread())->td_proc)->p_pid )
; (&afs_xdcache)->src_indicator = 602; } while (0)
;
2371 hset(afs_indexTimes[tdc->index], afs_indexCounter)((afs_indexTimes[tdc->index]) = (afs_indexCounter));
2372 hadd32(afs_indexCounter, 1)((void)((((afs_indexCounter).low ^ (int)(1)) & 0x80000000
) ? (((((afs_indexCounter).low + (int)(1)) & 0x80000000) ==
0) && (afs_indexCounter).high++) : (((afs_indexCounter
).low & (int)(1) & 0x80000000) && (afs_indexCounter
).high++)), (afs_indexCounter).low += (int)(1))
;
2373 ReleaseWriteLock(&afs_xdcache)do { ; (&afs_xdcache)->excl_locked &= ~2; if ((&
afs_xdcache)->wait_states) Afs_Lock_ReleaseR(&afs_xdcache
); (&afs_xdcache)->pid_writer=0; } while (0)
;
2374
2375 /* return the data */
2376 if (vType(avc)((avc)->v)->v_type == VDIR)
2377 *aoffset = abyte;
2378 else
2379 *aoffset = AFS_CHUNKOFFSET(abyte)((abyte < afs_FirstCSize) ? abyte : ((abyte - afs_FirstCSize
) & (afs_OtherCSize - 1)))
;
2380 *alen = (tdc->f.chunkBytes - *aoffset);
2381 ReleaseSharedLock(&tdc->lock)do { ; (&tdc->lock)->excl_locked &= ~(4 | 2); if
((&tdc->lock)->wait_states) Afs_Lock_ReleaseR(&
tdc->lock); (&tdc->lock)->pid_writer=0; } while (
0)
;
2382 }
2383
2384 /*
2385 * Locks held:
2386 * avc->lock(R) if setLocks && !slowPass
2387 * avc->lock(W) if !setLocks || slowPass
2388 */
2389
2390 /* Fix up the callback and status values in the vcache */
2391 doVcacheUpdate = 0;
2392 if (setLocks && !slowPass) {
2393 /* DCLOCKXXX
2394 *
2395 * This is our dirty little secret to parallel fetches.
2396 * We don't write-lock the vcache while doing the fetch,
2397 * but potentially we'll need to update the vcache after
2398 * the fetch is done.
2399 *
2400 * Drop the read lock and try to re-obtain the write
2401 * lock. If the vcache still has the same DV, it's
2402 * ok to go ahead and install the new data.
2403 */
2404 afs_hyper_t currentDV, statusDV;
2405
2406 hset(currentDV, avc->f.m.DataVersion)((currentDV) = (avc->f.m.DataVersion));
2407
2408 if (setNewCallback && avc->callback != newCallback)
2409 doVcacheUpdate = 1;
2410
2411 if (tsmall) {
2412 hset64(statusDV, tsmall->OutStatus.dataVersionHigh,((statusDV).high = (tsmall->OutStatus.dataVersionHigh), (statusDV
).low = (tsmall->OutStatus.DataVersion))
2413 tsmall->OutStatus.DataVersion)((statusDV).high = (tsmall->OutStatus.dataVersionHigh), (statusDV
).low = (tsmall->OutStatus.DataVersion))
;
2414
2415 if (setVcacheStatus && avc->f.m.Length != tsmall->OutStatus.Length)
2416 doVcacheUpdate = 1;
2417 if (setVcacheStatus && !hsame(currentDV, statusDV)((currentDV).low == (statusDV).low && (currentDV).high
== (statusDV).high)
)
2418 doVcacheUpdate = 1;
2419 }
2420
2421 ReleaseReadLock(&avc->lock)do { ; if (!(--((&avc->lock)->readers_reading)) &&
(&avc->lock)->wait_states) Afs_Lock_ReleaseW(&
avc->lock) ; if ( (&avc->lock)->pid_last_reader ==
(((__curthread())->td_proc)->p_pid ) ) (&avc->lock
)->pid_last_reader =0; } while (0)
;
2422
2423 if (doVcacheUpdate) {
2424 ObtainWriteLock(&avc->lock, 615)do { ; if (!(&avc->lock)->excl_locked && !(
&avc->lock)->readers_reading) (&avc->lock) ->
excl_locked = 2; else Afs_Lock_Obtain(&avc->lock, 2);
(&avc->lock)->pid_writer = (((__curthread())->td_proc
)->p_pid ); (&avc->lock)->src_indicator = 615; }
while (0)
;
2425 if (!hsame(avc->f.m.DataVersion, currentDV)((avc->f.m.DataVersion).low == (currentDV).low && (
avc->f.m.DataVersion).high == (currentDV).high)
) {
2426 /* We lose. Someone will beat us to it. */
2427 doVcacheUpdate = 0;
2428 ReleaseWriteLock(&avc->lock)do { ; (&avc->lock)->excl_locked &= ~2; if ((&
avc->lock)->wait_states) Afs_Lock_ReleaseR(&avc->
lock); (&avc->lock)->pid_writer=0; } while (0)
;
2429 }
2430 }
2431 }
2432
2433 /* With slow pass, we've already done all the updates */
2434 if (slowPass) {
2435 ReleaseWriteLock(&avc->lock)do { ; (&avc->lock)->excl_locked &= ~2; if ((&
avc->lock)->wait_states) Afs_Lock_ReleaseR(&avc->
lock); (&avc->lock)->pid_writer=0; } while (0)
;
2436 }
2437
2438 /* Check if we need to perform any last-minute fixes with a write-lock */
2439 if (!setLocks || doVcacheUpdate) {
2440 if (setNewCallback)
2441 avc->callback = newCallback;
2442 if (tsmall && setVcacheStatus)
2443 afs_ProcessFS(avc, &tsmall->OutStatus, areq);
2444 if (setLocks)
2445 ReleaseWriteLock(&avc->lock)do { ; (&avc->lock)->excl_locked &= ~2; if ((&
avc->lock)->wait_states) Afs_Lock_ReleaseR(&avc->
lock); (&avc->lock)->pid_writer=0; } while (0)
;
2446 }
2447
2448 if (tsmall)
2449 osi_FreeLargeSpace(tsmall);
2450
2451 return tdc;
2452} /*afs_GetDCache */
2453
2454
2455/*
2456 * afs_WriteThroughDSlots
2457 *
2458 * Description:
2459 * Sweep through the dcache slots and write out any modified
2460 * in-memory data back on to our caching store.
2461 *
2462 * Parameters:
2463 * None.
2464 *
2465 * Environment:
2466 * The afs_xdcache is write-locked through this whole affair.
2467 */
2468void
2469afs_WriteThroughDSlots(void)
2470{
2471 struct dcache *tdc;
2472 afs_int32 i, touchedit = 0;
2473
2474 struct afs_q DirtyQ, *tq;
2475
2476 AFS_STATCNT(afs_WriteThroughDSlots)((afs_cmstats.callInfo.C_afs_WriteThroughDSlots)++);
2477
2478 /*
2479 * Because of lock ordering, we can't grab dcache locks while
2480 * holding afs_xdcache. So we enter xdcache, get a reference
2481 * for every dcache entry, and exit xdcache.
2482 */
2483 ObtainWriteLock(&afs_xdcache, 283)do { ; if (!(&afs_xdcache)->excl_locked && !(&
afs_xdcache)->readers_reading) (&afs_xdcache) -> excl_locked
= 2; else Afs_Lock_Obtain(&afs_xdcache, 2); (&afs_xdcache
)->pid_writer = (((__curthread())->td_proc)->p_pid )
; (&afs_xdcache)->src_indicator = 283; } while (0)
;
2484 QInit(&DirtyQ)((&DirtyQ)->prev = (&DirtyQ)->next = (&DirtyQ
))
;
2485 for (i = 0; i < afs_cacheFiles; i++) {
2486 tdc = afs_indexTable[i];
2487
2488 /* Grab tlock in case the existing refcount isn't zero */
2489 if (tdc && !(afs_indexFlags[i] & (IFFree2 | IFDiscarded64))) {
2490 ObtainWriteLock(&tdc->tlock, 623)do { ; if (!(&tdc->tlock)->excl_locked && !
(&tdc->tlock)->readers_reading) (&tdc->tlock
) -> excl_locked = 2; else Afs_Lock_Obtain(&tdc->tlock
, 2); (&tdc->tlock)->pid_writer = (((__curthread())
->td_proc)->p_pid ); (&tdc->tlock)->src_indicator
= 623; } while (0)
;
2491 tdc->refCount++;
2492 ReleaseWriteLock(&tdc->tlock)do { ; (&tdc->tlock)->excl_locked &= ~2; if ((&
tdc->tlock)->wait_states) Afs_Lock_ReleaseR(&tdc->
tlock); (&tdc->tlock)->pid_writer=0; } while (0)
;
2493
2494 QAdd(&DirtyQ, &tdc->dirty)((&tdc->dirty)->next = (&DirtyQ)->next, (&
tdc->dirty)->prev = (&DirtyQ), (&DirtyQ)->next
->prev = (&tdc->dirty), (&DirtyQ)->next = (&
tdc->dirty))
;
2495 }
2496 }
2497 ReleaseWriteLock(&afs_xdcache)do { ; (&afs_xdcache)->excl_locked &= ~2; if ((&
afs_xdcache)->wait_states) Afs_Lock_ReleaseR(&afs_xdcache
); (&afs_xdcache)->pid_writer=0; } while (0)
;
2498
2499 /*
2500 * Now, for each dcache entry we found, check if it's dirty.
2501 * If so, get write-lock, get afs_xdcache, which protects
2502 * afs_cacheInodep, and flush it. Don't forget to put back
2503 * the refcounts.
2504 */
2505
2506#define DQTODC(q)((struct dcache *)(((char *) (q)) - sizeof(struct afs_q))) ((struct dcache *)(((char *) (q)) - sizeof(struct afs_q)))
2507
2508 for (tq = DirtyQ.prev; tq != &DirtyQ; tq = QPrev(tq)((tq)->prev)) {
2509 tdc = DQTODC(tq)((struct dcache *)(((char *) (tq)) - sizeof(struct afs_q)));
2510 if (tdc->dflags & DFEntryMod0x02) {
2511 int wrLock;
2512
2513 wrLock = (0 == NBObtainWriteLock(&tdc->lock, 619)(((&tdc->lock)->excl_locked || (&tdc->lock)->
readers_reading) ? 35 : (((&tdc->lock) -> excl_locked
= 2), ((&tdc->lock)->pid_writer = (((__curthread()
)->td_proc)->p_pid )), ((&tdc->lock)->src_indicator
= 619), 0))
);
2514
2515 /* Now that we have the write lock, double-check */
2516 if (wrLock && (tdc->dflags & DFEntryMod0x02)) {
2517 tdc->dflags &= ~DFEntryMod0x02;
2518 ObtainWriteLock(&afs_xdcache, 620)do { ; if (!(&afs_xdcache)->excl_locked && !(&
afs_xdcache)->readers_reading) (&afs_xdcache) -> excl_locked
= 2; else Afs_Lock_Obtain(&afs_xdcache, 2); (&afs_xdcache
)->pid_writer = (((__curthread())->td_proc)->p_pid )
; (&afs_xdcache)->src_indicator = 620; } while (0)
;
2519 afs_WriteDCache(tdc, 1);
2520 ReleaseWriteLock(&afs_xdcache)do { ; (&afs_xdcache)->excl_locked &= ~2; if ((&
afs_xdcache)->wait_states) Afs_Lock_ReleaseR(&afs_xdcache
); (&afs_xdcache)->pid_writer=0; } while (0)
;
2521 touchedit = 1;
2522 }
2523 if (wrLock)
2524 ReleaseWriteLock(&tdc->lock)do { ; (&tdc->lock)->excl_locked &= ~2; if ((&
tdc->lock)->wait_states) Afs_Lock_ReleaseR(&tdc->
lock); (&tdc->lock)->pid_writer=0; } while (0)
;
2525 }
2526
2527 afs_PutDCache(tdc);
2528 }
2529
2530 ObtainWriteLock(&afs_xdcache, 617)do { ; if (!(&afs_xdcache)->excl_locked && !(&
afs_xdcache)->readers_reading) (&afs_xdcache) -> excl_locked
= 2; else Afs_Lock_Obtain(&afs_xdcache, 2); (&afs_xdcache
)->pid_writer = (((__curthread())->td_proc)->p_pid )
; (&afs_xdcache)->src_indicator = 617; } while (0)
;
2531 if (!touchedit && (cacheDiskType != AFS_FCACHE_TYPE_MEM0x1)) {
2532 /* Touch the file to make sure that the mtime on the file is kept
2533 * up-to-date to avoid losing cached files on cold starts because
2534 * their mtime seems old...
2535 */
2536 struct afs_fheader theader;
2537
2538 theader.magic = AFS_FHMAGIC0x7635abaf;
2539 theader.firstCSize = AFS_FIRSTCSIZE(afs_FirstCSize);
2540 theader.otherCSize = AFS_OTHERCSIZE(afs_OtherCSize);
2541 theader.version = AFS_CI_VERSION4;
2542 theader.dataSize = sizeof(struct fcache);
2543 afs_osi_Write(afs_cacheInodep, 0, &theader, sizeof(theader));
2544 }
2545 ReleaseWriteLock(&afs_xdcache)do { ; (&afs_xdcache)->excl_locked &= ~2; if ((&
afs_xdcache)->wait_states) Afs_Lock_ReleaseR(&afs_xdcache
); (&afs_xdcache)->pid_writer=0; } while (0)
;
2546}
2547
2548/*
2549 * afs_MemGetDSlot
2550 *
2551 * Description:
2552 * Return a pointer to an freshly initialized dcache entry using
2553 * a memory-based cache. The tlock will be read-locked.
2554 *
2555 * Parameters:
2556 * aslot : Dcache slot to look at.
2557 * tmpdc : Ptr to dcache entry.
2558 *
2559 * Environment:
2560 * Must be called with afs_xdcache write-locked.
2561 */
2562
2563struct dcache *
2564afs_MemGetDSlot(afs_int32 aslot, struct dcache *tmpdc)
2565{
2566 struct dcache *tdc;
2567 int existing = 0;
2568
2569 AFS_STATCNT(afs_MemGetDSlot)((afs_cmstats.callInfo.C_afs_MemGetDSlot)++);
2570 if (CheckLock(&afs_xdcache)((&afs_xdcache)->excl_locked? (int) -1 : (int) (&afs_xdcache
)->readers_reading)
!= -1)
2571 osi_Panic("getdslot nolock");
2572 if (aslot < 0 || aslot >= afs_cacheFiles)
2573 osi_Panic("getdslot slot %d (of %d)", aslot, afs_cacheFiles);
2574 tdc = afs_indexTable[aslot];
2575 if (tdc) {
2576 QRemove(&tdc->lruq)((&tdc->lruq)->next->prev = (&tdc->lruq)->
prev, (&tdc->lruq)->prev->next = (&tdc->lruq
)->next, (&tdc->lruq)->prev = ((void *)0), (&
tdc->lruq)->next = ((void *)0))
; /* move to queue head */
2577 QAdd(&afs_DLRU, &tdc->lruq)((&tdc->lruq)->next = (&afs_DLRU)->next, (&
tdc->lruq)->prev = (&afs_DLRU), (&afs_DLRU)->
next->prev = (&tdc->lruq), (&afs_DLRU)->next
= (&tdc->lruq))
;
2578 /* We're holding afs_xdcache, but get tlock in case refCount != 0 */
2579 ObtainWriteLock(&tdc->tlock, 624)do { ; if (!(&tdc->tlock)->excl_locked && !
(&tdc->tlock)->readers_reading) (&tdc->tlock
) -> excl_locked = 2; else Afs_Lock_Obtain(&tdc->tlock
, 2); (&tdc->tlock)->pid_writer = (((__curthread())
->td_proc)->p_pid ); (&tdc->tlock)->src_indicator
= 624; } while (0)
;
2580 tdc->refCount++;
2581 ConvertWToRLock(&tdc->tlock)do { ; (&tdc->tlock)->excl_locked &= ~(4 | 2); (
(&tdc->tlock)->readers_reading)++; (&tdc->tlock
)->pid_last_reader = (((__curthread())->td_proc)->p_pid
) ; (&tdc->tlock)->pid_writer = 0; Afs_Lock_ReleaseR
(&tdc->tlock); } while (0)
;
2582 return tdc;
2583 }
2584 if (tmpdc == NULL((void *)0)) {
2585 if (!afs_freeDSList)
2586 afs_GetDownDSlot(4);
2587 if (!afs_freeDSList) {
2588 /* none free, making one is better than a panic */
2589 afs_stats_cmperf.dcacheXAllocs++; /* count in case we have a leak */
2590 tdc = afs_osi_Alloc(sizeof(struct dcache));
2591 osi_Assert(tdc != NULL)(void)((tdc != ((void *)0)) || (osi_AssertFailK( "tdc != NULL"
, "/home/wollman/openafs/src/afs/afs_dcache.c", 2591), 0))
;
2592#ifdef KERNEL_HAVE_PIN
2593 pin((char *)tdc, sizeof(struct dcache)); /* XXX */
2594#endif
2595 } else {
2596 tdc = afs_freeDSList;
2597 afs_freeDSList = (struct dcache *)tdc->lruq.next;
2598 existing = 1;
2599 }
2600 tdc->dflags = 0; /* up-to-date, not in free q */
2601 tdc->mflags = 0;
2602 QAdd(&afs_DLRU, &tdc->lruq)((&tdc->lruq)->next = (&afs_DLRU)->next, (&
tdc->lruq)->prev = (&afs_DLRU), (&afs_DLRU)->
next->prev = (&tdc->lruq), (&afs_DLRU)->next
= (&tdc->lruq))
;
2603 if (tdc->lruq.prev == &tdc->lruq)
2604 osi_Panic("lruq 3");
2605 } else {
2606 tdc = tmpdc;
2607 tdc->f.states = 0;
2608 }
2609
2610 /* initialize entry */
2611 tdc->f.fid.Cell = 0;
2612 tdc->f.fid.Fid.Volume = 0;
2613 tdc->f.chunk = -1;
2614 hones(tdc->f.versionNo)((tdc->f.versionNo).low = 0xffffffff, (tdc->f.versionNo
).high = 0xffffffff)
;
2615 tdc->f.inode.mem = aslot;
2616 tdc->dflags |= DFEntryMod0x02;
2617 tdc->refCount = 1;
2618 tdc->index = aslot;
2619 afs_indexUnique[aslot] = tdc->f.fid.Fid.Unique;
2620
2621 if (existing) {
2622 osi_Assert(0 == NBObtainWriteLock(&tdc->lock, 674))(void)((0 == (((&tdc->lock)->excl_locked || (&tdc
->lock)->readers_reading) ? 35 : (((&tdc->lock) ->
excl_locked = 2), ((&tdc->lock)->pid_writer = (((__curthread
())->td_proc)->p_pid )), ((&tdc->lock)->src_indicator
= 674), 0))) || (osi_AssertFailK( "0 == NBObtainWriteLock(&tdc->lock, 674)"
, "/home/wollman/openafs/src/afs/afs_dcache.c", 2622), 0))
;
2623 osi_Assert(0 == NBObtainWriteLock(&tdc->mflock, 675))(void)((0 == (((&tdc->mflock)->excl_locked || (&
tdc->mflock)->readers_reading) ? 35 : (((&tdc->mflock
) -> excl_locked = 2), ((&tdc->mflock)->pid_writer
= (((__curthread())->td_proc)->p_pid )), ((&tdc->
mflock)->src_indicator = 675), 0))) || (osi_AssertFailK( "0 == NBObtainWriteLock(&tdc->mflock, 675)"
, "/home/wollman/openafs/src/afs/afs_dcache.c", 2623), 0))
;
2624 osi_Assert(0 == NBObtainWriteLock(&tdc->tlock, 676))(void)((0 == (((&tdc->tlock)->excl_locked || (&
tdc->tlock)->readers_reading) ? 35 : (((&tdc->tlock
) -> excl_locked = 2), ((&tdc->tlock)->pid_writer
= (((__curthread())->td_proc)->p_pid )), ((&tdc->
tlock)->src_indicator = 676), 0))) || (osi_AssertFailK( "0 == NBObtainWriteLock(&tdc->tlock, 676)"
, "/home/wollman/openafs/src/afs/afs_dcache.c", 2624), 0))
;
2625 }
2626
2627 AFS_RWLOCK_INIT(&tdc->lock, "dcache lock")Lock_Init(&tdc->lock);
2628 AFS_RWLOCK_INIT(&tdc->tlock, "dcache tlock")Lock_Init(&tdc->tlock);
2629 AFS_RWLOCK_INIT(&tdc->mflock, "dcache flock")Lock_Init(&tdc->mflock);
2630 ObtainReadLock(&tdc->tlock)do { ; if (!((&tdc->tlock)->excl_locked & 2)) (
(&tdc->tlock)->readers_reading)++; else Afs_Lock_Obtain
(&tdc->tlock, 1); (&tdc->tlock)->pid_last_reader
= (((__curthread())->td_proc)->p_pid ); } while (0)
;
2631
2632 if (tmpdc == NULL((void *)0))
2633 afs_indexTable[aslot] = tdc;
2634 return tdc;
2635
2636} /*afs_MemGetDSlot */
2637
2638unsigned int last_error = 0, lasterrtime = 0;
2639
2640/*
2641 * afs_UFSGetDSlot
2642 *
2643 * Description:
2644 * Return a pointer to an freshly initialized dcache entry using
2645 * a UFS-based disk cache. The dcache tlock will be read-locked.
2646 *
2647 * Parameters:
2648 * aslot : Dcache slot to look at.
2649 * tmpdc : Ptr to dcache entry.
2650 *
2651 * Environment:
2652 * afs_xdcache lock write-locked.
2653 */
2654struct dcache *
2655afs_UFSGetDSlot(afs_int32 aslot, struct dcache *tmpdc)
2656{
2657 afs_int32 code;
2658 struct dcache *tdc;
2659 int existing = 0;
2660 int entryok;
2661
2662 AFS_STATCNT(afs_UFSGetDSlot)((afs_cmstats.callInfo.C_afs_UFSGetDSlot)++);
2663 if (CheckLock(&afs_xdcache)((&afs_xdcache)->excl_locked? (int) -1 : (int) (&afs_xdcache
)->readers_reading)
!= -1)
2664 osi_Panic("getdslot nolock");
2665 if (aslot < 0 || aslot >= afs_cacheFiles)
2666 osi_Panic("getdslot slot %d (of %d)", aslot, afs_cacheFiles);
2667 tdc = afs_indexTable[aslot];
2668 if (tdc) {
2669 QRemove(&tdc->lruq)((&tdc->lruq)->next->prev = (&tdc->lruq)->
prev, (&tdc->lruq)->prev->next = (&tdc->lruq
)->next, (&tdc->lruq)->prev = ((void *)0), (&
tdc->lruq)->next = ((void *)0))
; /* move to queue head */
2670 QAdd(&afs_DLRU, &tdc->lruq)((&tdc->lruq)->next = (&afs_DLRU)->next, (&
tdc->lruq)->prev = (&afs_DLRU), (&afs_DLRU)->
next->prev = (&tdc->lruq), (&afs_DLRU)->next
= (&tdc->lruq))
;
2671 /* Grab tlock in case refCount != 0 */
2672 ObtainWriteLock(&tdc->tlock, 625)do { ; if (!(&tdc->tlock)->excl_locked && !
(&tdc->tlock)->readers_reading) (&tdc->tlock
) -> excl_locked = 2; else Afs_Lock_Obtain(&tdc->tlock
, 2); (&tdc->tlock)->pid_writer = (((__curthread())
->td_proc)->p_pid ); (&tdc->tlock)->src_indicator
= 625; } while (0)
;
2673 tdc->refCount++;
2674 ConvertWToRLock(&tdc->tlock)do { ; (&tdc->tlock)->excl_locked &= ~(4 | 2); (
(&tdc->tlock)->readers_reading)++; (&tdc->tlock
)->pid_last_reader = (((__curthread())->td_proc)->p_pid
) ; (&tdc->tlock)->pid_writer = 0; Afs_Lock_ReleaseR
(&tdc->tlock); } while (0)
;
2675 return tdc;
2676 }
2677 /* otherwise we should read it in from the cache file */
2678 /*
2679 * If we weren't passed an in-memory region to place the file info,
2680 * we have to allocate one.
2681 */
2682 if (tmpdc == NULL((void *)0)) {
2683 if (!afs_freeDSList)
2684 afs_GetDownDSlot(4);
2685 if (!afs_freeDSList) {
2686 /* none free, making one is better than a panic */
2687 afs_stats_cmperf.dcacheXAllocs++; /* count in case we have a leak */
2688 tdc = afs_osi_Alloc(sizeof(struct dcache));
2689 osi_Assert(tdc != NULL)(void)((tdc != ((void *)0)) || (osi_AssertFailK( "tdc != NULL"
, "/home/wollman/openafs/src/afs/afs_dcache.c", 2689), 0))
;
2690#ifdef KERNEL_HAVE_PIN
2691 pin((char *)tdc, sizeof(struct dcache)); /* XXX */
2692#endif
2693 } else {
2694 tdc = afs_freeDSList;
2695 afs_freeDSList = (struct dcache *)tdc->lruq.next;
2696 existing = 1;
2697 }
2698 tdc->dflags = 0; /* up-to-date, not in free q */
2699 tdc->mflags = 0;
2700 QAdd(&afs_DLRU, &tdc->lruq)((&tdc->lruq)->next = (&afs_DLRU)->next, (&
tdc->lruq)->prev = (&afs_DLRU), (&afs_DLRU)->
next->prev = (&tdc->lruq), (&afs_DLRU)->next
= (&tdc->lruq))
;
2701 if (tdc->lruq.prev == &tdc->lruq)
2702 osi_Panic("lruq 3");
2703 } else {
2704 tdc = tmpdc;
2705 tdc->f.states = 0;
2706 }
2707
2708 /*
2709 * Seek to the aslot'th entry and read it in.
2710 */
2711 code =
2712 afs_osi_Read(afs_cacheInodep,
2713 sizeof(struct fcache) * aslot +
2714 sizeof(struct afs_fheader), (char *)(&tdc->f),
2715 sizeof(struct fcache));
2716 entryok = 1;
2717 if (code != sizeof(struct fcache))
2718 entryok = 0;
2719 if (!afs_CellNumValid(tdc->f.fid.Cell))
2720 entryok = 0;
2721
2722 if (!entryok) {
2723 tdc->f.fid.Cell = 0;
2724 tdc->f.fid.Fid.Volume = 0;
2725 tdc->f.chunk = -1;
2726 hones(tdc->f.versionNo)((tdc->f.versionNo).low = 0xffffffff, (tdc->f.versionNo
).high = 0xffffffff)
;
2727 tdc->dflags |= DFEntryMod0x02;
2728#if defined(KERNEL_HAVE_UERROR)
2729 last_error = getuerror()u.u_error;
2730#endif
2731 lasterrtime = osi_Time()time_second;
2732 afs_indexUnique[aslot] = tdc->f.fid.Fid.Unique;
2733 tdc->f.states &= ~(DRO1|DBackup2|DRW4);
2734 afs_DCMoveBucket(tdc, 0, 0);
2735 } else {
2736 if (&tdc->f != 0) {
2737 if (tdc->f.states & DRO1) {
2738 afs_DCMoveBucket(tdc, 0, 2);
2739 } else if (tdc->f.states & DBackup2) {
2740 afs_DCMoveBucket(tdc, 0, 1);
2741 } else {
2742 afs_DCMoveBucket(tdc, 0, 1);
2743 }
2744 }
2745 }
2746 tdc->refCount = 1;
2747 tdc->index = aslot;
2748 if (tdc->f.chunk >= 0)
2749 tdc->validPos = AFS_CHUNKTOBASE(tdc->f.chunk)((tdc->f.chunk == 0) ? 0 : ((afs_size_t) afs_FirstCSize + (
(afs_size_t) (tdc->f.chunk - 1) << afs_LogChunk)))
+ tdc->f.chunkBytes;
2750 else
2751 tdc->validPos = 0;
2752
2753 if (existing) {
2754 osi_Assert(0 == NBObtainWriteLock(&tdc->lock, 674))(void)((0 == (((&tdc->lock)->excl_locked || (&tdc
->lock)->readers_reading) ? 35 : (((&tdc->lock) ->
excl_locked = 2), ((&tdc->lock)->pid_writer = (((__curthread
())->td_proc)->p_pid )), ((&tdc->lock)->src_indicator
= 674), 0))) || (osi_AssertFailK( "0 == NBObtainWriteLock(&tdc->lock, 674)"
, "/home/wollman/openafs/src/afs/afs_dcache.c", 2754), 0))
;
2755 osi_Assert(0 == NBObtainWriteLock(&tdc->mflock, 675))(void)((0 == (((&tdc->mflock)->excl_locked || (&
tdc->mflock)->readers_reading) ? 35 : (((&tdc->mflock
) -> excl_locked = 2), ((&tdc->mflock)->pid_writer
= (((__curthread())->td_proc)->p_pid )), ((&tdc->
mflock)->src_indicator = 675), 0))) || (osi_AssertFailK( "0 == NBObtainWriteLock(&tdc->mflock, 675)"
, "/home/wollman/openafs/src/afs/afs_dcache.c", 2755), 0))
;
2756 osi_Assert(0 == NBObtainWriteLock(&tdc->tlock, 676))(void)((0 == (((&tdc->tlock)->excl_locked || (&
tdc->tlock)->readers_reading) ? 35 : (((&tdc->tlock
) -> excl_locked = 2), ((&tdc->tlock)->pid_writer
= (((__curthread())->td_proc)->p_pid )), ((&tdc->
tlock)->src_indicator = 676), 0))) || (osi_AssertFailK( "0 == NBObtainWriteLock(&tdc->tlock, 676)"
, "/home/wollman/openafs/src/afs/afs_dcache.c", 2756), 0))
;
2757 }
2758
2759 AFS_RWLOCK_INIT(&tdc->lock, "dcache lock")Lock_Init(&tdc->lock);
2760 AFS_RWLOCK_INIT(&tdc->tlock, "dcache tlock")Lock_Init(&tdc->tlock);
2761 AFS_RWLOCK_INIT(&tdc->mflock, "dcache flock")Lock_Init(&tdc->mflock);
2762 ObtainReadLock(&tdc->tlock)do { ; if (!((&tdc->tlock)->excl_locked & 2)) (
(&tdc->tlock)->readers_reading)++; else Afs_Lock_Obtain
(&tdc->tlock, 1); (&tdc->tlock)->pid_last_reader
= (((__curthread())->td_proc)->p_pid ); } while (0)
;
2763
2764 /*
2765 * If we didn't read into a temporary dcache region, update the
2766 * slot pointer table.
2767 */
2768 if (tmpdc == NULL((void *)0))
2769 afs_indexTable[aslot] = tdc;
2770 return tdc;
2771
2772} /*afs_UFSGetDSlot */
2773
2774
2775
2776/*!
2777 * Write a particular dcache entry back to its home in the
2778 * CacheInfo file.
2779 *
2780 * \param adc Pointer to the dcache entry to write.
2781 * \param atime If true, set the modtime on the file to the current time.
2782 *
2783 * \note Environment:
2784 * Must be called with the afs_xdcache lock at least read-locked,
2785 * and dcache entry at least read-locked.
2786 * The reference count is not changed.
2787 */
2788
2789int
2790afs_WriteDCache(struct dcache *adc, int atime)
2791{
2792 afs_int32 code;
2793
2794 if (cacheDiskType == AFS_FCACHE_TYPE_MEM0x1)
2795 return 0;
2796 AFS_STATCNT(afs_WriteDCache)((afs_cmstats.callInfo.C_afs_WriteDCache)++);
2797 osi_Assert(WriteLocked(&afs_xdcache))(void)((((&afs_xdcache)->excl_locked & 2)) || (osi_AssertFailK
( "WriteLocked(&afs_xdcache)" , "/home/wollman/openafs/src/afs/afs_dcache.c"
, 2797), 0))
;
2798 if (atime)
2799 adc->f.modTime = osi_Time()time_second;
2800 /*
2801 * Seek to the right dcache slot and write the in-memory image out to disk.
2802 */
2803 afs_cellname_write();
2804 code =
2805 afs_osi_Write(afs_cacheInodep,
2806 sizeof(struct fcache) * adc->index +
2807 sizeof(struct afs_fheader), (char *)(&adc->f),
2808 sizeof(struct fcache));
2809 if (code != sizeof(struct fcache))
2810 return EIO5;
2811 return 0;
2812}
2813
2814
2815
2816/*!
2817 * Wake up users of a particular file waiting for stores to take
2818 * place.
2819 *
2820 * \param avc Ptr to related vcache entry.
2821 *
2822 * \note Environment:
2823 * Nothing interesting.
2824 */
2825int
2826afs_wakeup(struct vcache *avc)
2827{
2828 int i;
2829 struct brequest *tb;
2830 tb = afs_brs;
2831 AFS_STATCNT(afs_wakeup)((afs_cmstats.callInfo.C_afs_wakeup)++);
2832 for (i = 0; i < NBRS15; i++, tb++) {
2833 /* if request is valid and for this file, we've found it */
2834 if (tb->refCount > 0 && avc == tb->vc) {
2835
2836 /*
2837 * If CSafeStore is on, then we don't awaken the guy
2838 * waiting for the store until the whole store has finished.
2839 * Otherwise, we do it now. Note that if CSafeStore is on,
2840 * the BStore routine actually wakes up the user, instead
2841 * of us.
2842 * I think this is redundant now because this sort of thing
2843 * is already being handled by the higher-level code.
2844 */
2845 if ((avc->f.states & CSafeStore0x00000040) == 0) {
2846 tb->code = 0;
2847 tb->flags |= BUVALID2;
2848 if (tb->flags & BUWAIT4) {
2849 tb->flags &= ~BUWAIT4;
2850 afs_osi_Wakeup(tb);
2851 }
2852 }
2853 break;
2854 }
2855 }
2856 return 0;
2857}
2858
2859
2860/*!
2861 * Given a file name and inode, set up that file to be an
2862 * active member in the AFS cache. This also involves checking
2863 * the usability of its data.
2864 *
2865 * \param afile Name of the cache file to initialize.
2866 * \param ainode Inode of the file.
2867 *
2868 * \note Environment:
2869 * This function is called only during initialization.
2870 */
2871int
2872afs_InitCacheFile(char *afile, ino_t ainode)
2873{
2874 afs_int32 code;
2875 afs_int32 index;
2876 int fileIsBad;
2877 struct osi_file *tfile;
2878 struct osi_stat tstat;
2879 struct dcache *tdc;
2880
2881 AFS_STATCNT(afs_InitCacheFile)((afs_cmstats.callInfo.C_afs_InitCacheFile)++);
2882 index = afs_stats_cmperf.cacheNumEntries;
2883 if (index >= afs_cacheFiles)
2884 return EINVAL22;
2885
2886 ObtainWriteLock(&afs_xdcache, 282)do { ; if (!(&afs_xdcache)->excl_locked && !(&
afs_xdcache)->readers_reading) (&afs_xdcache) -> excl_locked
= 2; else Afs_Lock_Obtain(&afs_xdcache, 2); (&afs_xdcache
)->pid_writer = (((__curthread())->td_proc)->p_pid )
; (&afs_xdcache)->src_indicator = 282; } while (0)
;
2887 tdc = afs_GetDSlot(index, NULL)(*(afs_cacheType->GetDSlot))(index, ((void *)0));
2888 ReleaseReadLock(&tdc->tlock)do { ; if (!(--((&tdc->tlock)->readers_reading)) &&
(&tdc->tlock)->wait_states) Afs_Lock_ReleaseW(&
tdc->tlock) ; if ( (&tdc->tlock)->pid_last_reader
== (((__curthread())->td_proc)->p_pid ) ) (&tdc->
tlock)->pid_last_reader =0; } while (0)
;
2889 ReleaseWriteLock(&afs_xdcache)do { ; (&afs_xdcache)->excl_locked &= ~2; if ((&
afs_xdcache)->wait_states) Afs_Lock_ReleaseR(&afs_xdcache
); (&afs_xdcache)->pid_writer=0; } while (0)
;
2890
2891 ObtainWriteLock(&tdc->lock, 621)do { ; if (!(&tdc->lock)->excl_locked && !(
&tdc->lock)->readers_reading) (&tdc->lock) ->
excl_locked = 2; else Afs_Lock_Obtain(&tdc->lock, 2);
(&tdc->lock)->pid_writer = (((__curthread())->td_proc
)->p_pid ); (&tdc->lock)->src_indicator = 621; }
while (0)
;
2892 ObtainWriteLock(&afs_xdcache, 622)do { ; if (!(&afs_xdcache)->excl_locked && !(&
afs_xdcache)->readers_reading) (&afs_xdcache) -> excl_locked
= 2; else Afs_Lock_Obtain(&afs_xdcache, 2); (&afs_xdcache
)->pid_writer = (((__curthread())->td_proc)->p_pid )
; (&afs_xdcache)->src_indicator = 622; } while (0)
;
2893 if (afile) {
2894 code = afs_LookupInodeByPath(afile, &tdc->f.inode.ufs, NULL((void *)0));
2895 if (code) {
2896 ReleaseWriteLock(&afs_xdcache)do { ; (&afs_xdcache)->excl_locked &= ~2; if ((&
afs_xdcache)->wait_states) Afs_Lock_ReleaseR(&afs_xdcache
); (&afs_xdcache)->pid_writer=0; } while (0)
;
2897 ReleaseWriteLock(&tdc->lock)do { ; (&tdc->lock)->excl_locked &= ~2; if ((&
tdc->lock)->wait_states) Afs_Lock_ReleaseR(&tdc->
lock); (&tdc->lock)->pid_writer=0; } while (0)
;
2898 afs_PutDCache(tdc);
2899 return code;
2900 }
2901 } else {
2902 /* Add any other 'complex' inode types here ... */
2903#if !defined(AFS_LINUX26_ENV) && !defined(AFS_CACHE_VNODE_PATH)
2904 tdc->f.inode.ufs = ainode;
2905#else
2906 osi_Panic("Can't init cache with inode numbers when complex inodes are "
2907 "in use\n");
2908#endif
2909 }
2910 fileIsBad = 0;
2911 if ((tdc->f.states & DWriting8) || tdc->f.fid.Fid.Volume == 0)
2912 fileIsBad = 1;
2913 tfile = osi_UFSOpen(&tdc->f.inode);
2914 code = afs_osi_Stat(tfile, &tstat);
2915 if (code)
2916 osi_Panic("initcachefile stat");
2917
2918 /*
2919 * If file size doesn't match the cache info file, it's probably bad.
2920 */
2921 if (tdc->f.chunkBytes != tstat.size)
2922 fileIsBad = 1;
2923 tdc->f.chunkBytes = 0;
2924
2925 /*
2926 * If file changed within T (120?) seconds of cache info file, it's
2927 * probably bad. In addition, if slot changed within last T seconds,
2928 * the cache info file may be incorrectly identified, and so slot
2929 * may be bad.
2930 */
2931 if (cacheInfoModTime < tstat.mtime + 120)
2932 fileIsBad = 1;
2933 if (cacheInfoModTime < tdc->f.modTime + 120)
2934 fileIsBad = 1;
2935 /* In case write through is behind, make sure cache items entry is
2936 * at least as new as the chunk.
2937 */
2938 if (tdc->f.modTime < tstat.mtime)
2939 fileIsBad = 1;
2940 if (fileIsBad) {
2941 tdc->f.fid.Fid.Volume = 0; /* not in the hash table */
2942 if (tstat.size != 0)
2943 osi_UFSTruncate(tfile, 0);
2944 tdc->f.states &= ~(DRO1|DBackup2|DRW4);
2945 afs_DCMoveBucket(tdc, 0, 0);
2946 /* put entry in free cache slot list */
2947 afs_dvnextTbl[tdc->index] = afs_freeDCList;
2948 afs_freeDCList = index;
2949 afs_freeDCCount++;
2950 afs_indexFlags[index] |= IFFree2;
2951 afs_indexUnique[index] = 0;
2952 } else {
2953 /*
2954 * We must put this entry in the appropriate hash tables.
2955 * Note that i is still set from the above DCHash call
2956 */
2957 code = DCHash(&tdc->f.fid, tdc->f.chunk)((((&tdc->f.fid)->Fid.Vnode + (&tdc->f.fid)->
Fid.Volume + (tdc->f.chunk))) & (afs_dhashsize-1))
;
2958 afs_dcnextTbl[tdc->index] = afs_dchashTbl[code];
2959 afs_dchashTbl[code] = tdc->index;
2960 code = DVHash(&tdc->f.fid)((((&tdc->f.fid)->Fid.Vnode + (&tdc->f.fid)->
Fid.Volume )) & (afs_dhashsize-1))
;
2961 afs_dvnextTbl[tdc->index] = afs_dvhashTbl[code];
2962 afs_dvhashTbl[code] = tdc->index;
2963 afs_AdjustSize(tdc, tstat.size); /* adjust to new size */
2964 if (tstat.size > 0)
2965 /* has nontrivial amt of data */
2966 afs_indexFlags[index] |= IFEverUsed1;
2967 afs_stats_cmperf.cacheFilesReused++;
2968 /*
2969 * Initialize index times to file's mod times; init indexCounter
2970 * to max thereof
2971 */
2972 hset32(afs_indexTimes[index], tstat.atime)((afs_indexTimes[index]).high = 0, (afs_indexTimes[index]).low
= (tstat.atime))
;
2973 if (hgetlo(afs_indexCounter)((afs_indexCounter).low) < tstat.atime) {
2974 hset32(afs_indexCounter, tstat.atime)((afs_indexCounter).high = 0, (afs_indexCounter).low = (tstat
.atime))
;
2975 }
2976 afs_indexUnique[index] = tdc->f.fid.Fid.Unique;
2977 } /*File is not bad */
2978
2979 osi_UFSClose(tfile);
2980 tdc->f.states &= ~DWriting8;
2981 tdc->dflags &= ~DFEntryMod0x02;
2982 /* don't set f.modTime; we're just cleaning up */
2983 afs_WriteDCache(tdc, 0);
2984 ReleaseWriteLock(&afs_xdcache)do { ; (&afs_xdcache)->excl_locked &= ~2; if ((&
afs_xdcache)->wait_states) Afs_Lock_ReleaseR(&afs_xdcache
); (&afs_xdcache)->pid_writer=0; } while (0)
;
2985 ReleaseWriteLock(&tdc->lock)do { ; (&tdc->lock)->excl_locked &= ~2; if ((&
tdc->lock)->wait_states) Afs_Lock_ReleaseR(&tdc->
lock); (&tdc->lock)->pid_writer=0; } while (0)
;
2986 afs_PutDCache(tdc);
2987 afs_stats_cmperf.cacheNumEntries++;
2988 return 0;
2989}
2990
2991
2992/*Max # of struct dcache's resident at any time*/
2993/*
2994 * If 'dchint' is enabled then in-memory dcache min is increased because of
2995 * crashes...
2996 */
2997#define DDSIZE200 200
2998
2999/*!
3000 * Initialize dcache related variables.
3001 *
3002 * \param afiles
3003 * \param ablocks
3004 * \param aDentries
3005 * \param achunk
3006 * \param aflags
3007 *
3008 */
3009void
3010afs_dcacheInit(int afiles, int ablocks, int aDentries, int achunk, int aflags)
3011{
3012 struct dcache *tdp;
3013 int i;
3014 int code;
3015
3016 afs_freeDCList = NULLIDX(-1);
3017 afs_discardDCList = NULLIDX(-1);
3018 afs_freeDCCount = 0;
3019 afs_freeDSList = NULL((void *)0);
3020 hzero(afs_indexCounter)((afs_indexCounter).low = 0, (afs_indexCounter).high = 0);
3021
3022 LOCK_INIT(&afs_xdcache, "afs_xdcache")Lock_Init(&afs_xdcache);
3023
3024 /*
3025 * Set chunk size
3026 */
3027 if (achunk) {
3028 if (achunk < 0 || achunk > 30)
3029 achunk = 13; /* Use default */
3030 AFS_SETCHUNKSIZE(achunk){ afs_LogChunk = achunk; afs_FirstCSize = afs_OtherCSize = (1
<< achunk); }
;
3031 }
3032
3033 if (!aDentries)
3034 aDentries = DDSIZE200;
3035
3036 if (aflags & AFSCALL_INIT_MEMCACHE0x1) {
3037 /*
3038 * Use a memory cache instead of a disk cache
3039 */
3040 cacheDiskType = AFS_FCACHE_TYPE_MEM0x1;
3041 afs_cacheType = &afs_MemCacheOps;
3042 afiles = (afiles < aDentries) ? afiles : aDentries; /* min */
3043 ablocks = afiles * (AFS_FIRSTCSIZE(afs_FirstCSize) / 1024);
3044 /* ablocks is reported in 1K blocks */
3045 code = afs_InitMemCache(afiles, AFS_FIRSTCSIZE(afs_FirstCSize), aflags);
3046 if (code != 0) {
3047 afs_warn("afsd: memory cache too large for available memory.\n");
3048 afs_warn("afsd: AFS files cannot be accessed.\n\n");
3049 dcacheDisabled = 1;
3050 afiles = ablocks = 0;
3051 } else
3052 afs_warn("Memory cache: Allocating %d dcache entries...",
3053 aDentries);
3054 } else {
3055 cacheDiskType = AFS_FCACHE_TYPE_UFS0x0;
3056 afs_cacheType = &afs_UfsCacheOps;
3057 }
3058
3059 if (aDentries > 512)
3060 afs_dhashsize = 2048;
3061 /* initialize hash tables */
3062 afs_dvhashTbl = afs_osi_Alloc(afs_dhashsize * sizeof(afs_int32));
3063 osi_Assert(afs_dvhashTbl != NULL)(void)((afs_dvhashTbl != ((void *)0)) || (osi_AssertFailK( "afs_dvhashTbl != NULL"
, "/home/wollman/openafs/src/afs/afs_dcache.c", 3063), 0))
;
3064 afs_dchashTbl = afs_osi_Alloc(afs_dhashsize * sizeof(afs_int32));
3065 osi_Assert(afs_dchashTbl != NULL)(void)((afs_dchashTbl != ((void *)0)) || (osi_AssertFailK( "afs_dchashTbl != NULL"
, "/home/wollman/openafs/src/afs/afs_dcache.c", 3065), 0))
;
3066 for (i = 0; i < afs_dhashsize; i++) {
3067 afs_dvhashTbl[i] = NULLIDX(-1);
3068 afs_dchashTbl[i] = NULLIDX(-1);
3069 }
3070 afs_dvnextTbl = afs_osi_Alloc(afiles * sizeof(afs_int32));
3071 osi_Assert(afs_dvnextTbl != NULL)(void)((afs_dvnextTbl != ((void *)0)) || (osi_AssertFailK( "afs_dvnextTbl != NULL"
, "/home/wollman/openafs/src/afs/afs_dcache.c", 3071), 0))
;
3072 afs_dcnextTbl = afs_osi_Alloc(afiles * sizeof(afs_int32));
3073 osi_Assert(afs_dcnextTbl != NULL)(void)((afs_dcnextTbl != ((void *)0)) || (osi_AssertFailK( "afs_dcnextTbl != NULL"
, "/home/wollman/openafs/src/afs/afs_dcache.c", 3073), 0))
;
3074 for (i = 0; i < afiles; i++) {
3075 afs_dvnextTbl[i] = NULLIDX(-1);
3076 afs_dcnextTbl[i] = NULLIDX(-1);
3077 }
3078
3079 /* Allocate and zero the pointer array to the dcache entries */
3080 afs_indexTable = afs_osi_Alloc(sizeof(struct dcache *) * afiles);
3081 osi_Assert(afs_indexTable != NULL)(void)((afs_indexTable != ((void *)0)) || (osi_AssertFailK( "afs_indexTable != NULL"
, "/home/wollman/openafs/src/afs/afs_dcache.c", 3081), 0))
;
3082 memset(afs_indexTable, 0, sizeof(struct dcache *) * afiles);
3083 afs_indexTimes = afs_osi_Alloc(afiles * sizeof(afs_hyper_t));
3084 osi_Assert(afs_indexTimes != NULL)(void)((afs_indexTimes != ((void *)0)) || (osi_AssertFailK( "afs_indexTimes != NULL"
, "/home/wollman/openafs/src/afs/afs_dcache.c", 3084), 0))
;
3085 memset(afs_indexTimes, 0, afiles * sizeof(afs_hyper_t));
3086 afs_indexUnique = afs_osi_Alloc(afiles * sizeof(afs_uint32));
3087 osi_Assert(afs_indexUnique != NULL)(void)((afs_indexUnique != ((void *)0)) || (osi_AssertFailK( "afs_indexUnique != NULL"
, "/home/wollman/openafs/src/afs/afs_dcache.c", 3087), 0))
;
3088 memset(afs_indexUnique, 0, afiles * sizeof(afs_uint32));
3089 afs_indexFlags = afs_osi_Alloc(afiles * sizeof(u_char));
3090 osi_Assert(afs_indexFlags != NULL)(void)((afs_indexFlags != ((void *)0)) || (osi_AssertFailK( "afs_indexFlags != NULL"
, "/home/wollman/openafs/src/afs/afs_dcache.c", 3090), 0))
;
3091 memset(afs_indexFlags, 0, afiles * sizeof(char));
3092
3093 /* Allocate and thread the struct dcache entries themselves */
3094 tdp = afs_Initial_freeDSList =
3095 afs_osi_Alloc(aDentries * sizeof(struct dcache));
3096 osi_Assert(tdp != NULL)(void)((tdp != ((void *)0)) || (osi_AssertFailK( "tdp != NULL"
, "/home/wollman/openafs/src/afs/afs_dcache.c", 3096), 0))
;
3097 memset(tdp, 0, aDentries * sizeof(struct dcache));
3098#ifdef KERNEL_HAVE_PIN
3099 pin((char *)afs_indexTable, sizeof(struct dcache *) * afiles); /* XXX */
3100 pin((char *)afs_indexTimes, sizeof(afs_hyper_t) * afiles); /* XXX */
3101 pin((char *)afs_indexFlags, sizeof(char) * afiles); /* XXX */
3102 pin((char *)afs_indexUnique, sizeof(afs_int32) * afiles); /* XXX */
3103 pin((char *)tdp, aDentries * sizeof(struct dcache)); /* XXX */
3104 pin((char *)afs_dvhashTbl, sizeof(afs_int32) * afs_dhashsize); /* XXX */
3105 pin((char *)afs_dchashTbl, sizeof(afs_int32) * afs_dhashsize); /* XXX */
3106 pin((char *)afs_dcnextTbl, sizeof(afs_int32) * afiles); /* XXX */
3107 pin((char *)afs_dvnextTbl, sizeof(afs_int32) * afiles); /* XXX */
3108#endif
3109
3110 afs_freeDSList = &tdp[0];
3111 for (i = 0; i < aDentries - 1; i++) {
3112 tdp[i].lruq.next = (struct afs_q *)(&tdp[i + 1]);
3113 AFS_RWLOCK_INIT(&tdp[i].lock, "dcache lock")Lock_Init(&tdp[i].lock);
3114 AFS_RWLOCK_INIT(&tdp[i].tlock, "dcache tlock")Lock_Init(&tdp[i].tlock);
3115 AFS_RWLOCK_INIT(&tdp[i].mflock, "dcache flock")Lock_Init(&tdp[i].mflock);
3116 }
3117 tdp[aDentries - 1].lruq.next = (struct afs_q *)0;
3118 AFS_RWLOCK_INIT(&tdp[aDentries - 1].lock, "dcache lock")Lock_Init(&tdp[aDentries - 1].lock);
3119 AFS_RWLOCK_INIT(&tdp[aDentries - 1].tlock, "dcache tlock")Lock_Init(&tdp[aDentries - 1].tlock);
3120 AFS_RWLOCK_INIT(&tdp[aDentries - 1].mflock, "dcache flock")Lock_Init(&tdp[aDentries - 1].mflock);
3121
3122 afs_stats_cmperf.cacheBlocksOrig = afs_stats_cmperf.cacheBlocksTotal =
3123 afs_cacheBlocks = ablocks;
3124 afs_ComputeCacheParms(); /* compute parms based on cache size */
3125
3126 afs_dcentries = aDentries;
3127 afs_blocksUsed = 0;
3128 afs_stats_cmperf.cacheBucket0_Discarded =
3129 afs_stats_cmperf.cacheBucket1_Discarded =
3130 afs_stats_cmperf.cacheBucket2_Discarded = 0;
3131 afs_DCSizeInit();
3132 QInit(&afs_DLRU)((&afs_DLRU)->prev = (&afs_DLRU)->next = (&
afs_DLRU))
;
3133}
3134
3135/*!
3136 * Shuts down the cache.
3137 *
3138 */
3139void
3140shutdown_dcache(void)
3141{
3142 int i;
3143
3144#ifdef AFS_CACHE_VNODE_PATH
3145 if (cacheDiskType != AFS_FCACHE_TYPE_MEM0x1) {
3146 struct dcache *tdc;
3147 for (i = 0; i < afs_cacheFiles; i++) {
3148 tdc = afs_indexTable[i];
3149 if (tdc) {
3150 afs_osi_FreeStr(tdc->f.inode.ufs);
3151 }
3152 }
3153 }
3154#endif
3155
3156 afs_osi_Free(afs_dvnextTbl, afs_cacheFiles * sizeof(afs_int32));
3157 afs_osi_Free(afs_dcnextTbl, afs_cacheFiles * sizeof(afs_int32));
3158 afs_osi_Free(afs_indexTable, afs_cacheFiles * sizeof(struct dcache *));
3159 afs_osi_Free(afs_indexTimes, afs_cacheFiles * sizeof(afs_hyper_t));
3160 afs_osi_Free(afs_indexUnique, afs_cacheFiles * sizeof(afs_uint32));
3161 afs_osi_Free(afs_indexFlags, afs_cacheFiles * sizeof(u_char));
3162 afs_osi_Free(afs_Initial_freeDSList,
3163 afs_dcentries * sizeof(struct dcache));
3164#ifdef KERNEL_HAVE_PIN
3165 unpin((char *)afs_dcnextTbl, afs_cacheFiles * sizeof(afs_int32));
3166 unpin((char *)afs_dvnextTbl, afs_cacheFiles * sizeof(afs_int32));
3167 unpin((char *)afs_indexTable, afs_cacheFiles * sizeof(struct dcache *));
3168 unpin((char *)afs_indexTimes, afs_cacheFiles * sizeof(afs_hyper_t));
3169 unpin((char *)afs_indexUnique, afs_cacheFiles * sizeof(afs_uint32));
3170 unpin((u_char *) afs_indexFlags, afs_cacheFiles * sizeof(u_char));
3171 unpin(afs_Initial_freeDSList, afs_dcentries * sizeof(struct dcache));
3172#endif
3173
3174
3175 for (i = 0; i < afs_dhashsize; i++) {
3176 afs_dvhashTbl[i] = NULLIDX(-1);
3177 afs_dchashTbl[i] = NULLIDX(-1);
3178 }
3179
3180 afs_osi_Free(afs_dvhashTbl, afs_dhashsize * sizeof(afs_int32));
3181 afs_osi_Free(afs_dchashTbl, afs_dhashsize * sizeof(afs_int32));
3182
3183 afs_blocksUsed = afs_dcentries = 0;
3184 afs_stats_cmperf.cacheBucket0_Discarded =
3185 afs_stats_cmperf.cacheBucket1_Discarded =
3186 afs_stats_cmperf.cacheBucket2_Discarded = 0;
3187 hzero(afs_indexCounter)((afs_indexCounter).low = 0, (afs_indexCounter).high = 0);
3188
3189 afs_freeDCCount = 0;
3190 afs_freeDCList = NULLIDX(-1);
3191 afs_discardDCList = NULLIDX(-1);
3192 afs_freeDSList = afs_Initial_freeDSList = 0;
3193
3194 LOCK_INIT(&afs_xdcache, "afs_xdcache")Lock_Init(&afs_xdcache);
3195 QInit(&afs_DLRU)((&afs_DLRU)->prev = (&afs_DLRU)->next = (&
afs_DLRU))
;
3196
3197}
3198
3199/*!
3200 * Get a dcache ready for writing, respecting the current cache size limits
3201 *
3202 * len is required because afs_GetDCache with flag == 4 expects the length
3203 * field to be filled. It decides from this whether it's necessary to fetch
3204 * data into the chunk before writing or not (when the whole chunk is
3205 * overwritten!).
3206 *
3207 * \param avc The vcache to fetch a dcache for
3208 * \param filePos The start of the section to be written
3209 * \param len The length of the section to be written
3210 * \param areq
3211 * \param noLock
3212 *
3213 * \return If successful, a reference counted dcache with tdc->lock held. Lock
3214 * must be released and afs_PutDCache() called to free dcache.
3215 * NULL on failure
3216 *
3217 * \note avc->lock must be held on entry. Function may release and reobtain
3218 * avc->lock and GLOCK.
3219 */
3220
3221struct dcache *
3222afs_ObtainDCacheForWriting(struct vcache *avc, afs_size_t filePos,
3223 afs_size_t len, struct vrequest *areq,
3224 int noLock)
3225{
3226 struct dcache *tdc = NULL((void *)0);
3227 afs_size_t offset;
3228
3229 /* read the cached info */
3230 if (noLock) {
3231 tdc = afs_FindDCache(avc, filePos);
3232 if (tdc)
3233 ObtainWriteLock(&tdc->lock, 657)do { ; if (!(&tdc->lock)->excl_locked && !(
&tdc->lock)->readers_reading) (&tdc->lock) ->
excl_locked = 2; else Afs_Lock_Obtain(&tdc->lock, 2);
(&tdc->lock)->pid_writer = (((__curthread())->td_proc
)->p_pid ); (&tdc->lock)->src_indicator = 657; }
while (0)
;
3234 } else if (afs_blocksUsed >
3235 PERCENT(CM_WAITFORDRAINPCT, afs_cacheBlocks)((afs_cacheBlocks & 0xffe00000) ? ((afs_cacheBlocks) / 100
* (98)) : ((98) * (afs_cacheBlocks) / 100))
) {
3236 tdc = afs_FindDCache(avc, filePos);
3237 if (tdc) {
3238 ObtainWriteLock(&tdc->lock, 658)do { ; if (!(&tdc->lock)->excl_locked && !(
&tdc->lock)->readers_reading) (&tdc->lock) ->
excl_locked = 2; else Afs_Lock_Obtain(&tdc->lock, 2);
(&tdc->lock)->pid_writer = (((__curthread())->td_proc
)->p_pid ); (&tdc->lock)->src_indicator = 658; }
while (0)
;
3239 if (!hsame(tdc->f.versionNo, avc->f.m.DataVersion)((tdc->f.versionNo).low == (avc->f.m.DataVersion).low &&
(tdc->f.versionNo).high == (avc->f.m.DataVersion).high
)
3240 || (tdc->dflags & DFFetching0x04)) {
3241 ReleaseWriteLock(&tdc->lock)do { ; (&tdc->lock)->excl_locked &= ~2; if ((&
tdc->lock)->wait_states) Afs_Lock_ReleaseR(&tdc->
lock); (&tdc->lock)->pid_writer=0; } while (0)
;
3242 afs_PutDCache(tdc);
3243 tdc = NULL((void *)0);
3244 }
3245 }
3246 if (!tdc) {
3247 afs_MaybeWakeupTruncateDaemon();
3248 while (afs_blocksUsed >
3249 PERCENT(CM_WAITFORDRAINPCT, afs_cacheBlocks)((afs_cacheBlocks & 0xffe00000) ? ((afs_cacheBlocks) / 100
* (98)) : ((98) * (afs_cacheBlocks) / 100))
) {
3250 ReleaseWriteLock(&avc->lock)do { ; (&avc->lock)->excl_locked &= ~2; if ((&
avc->lock)->wait_states) Afs_Lock_ReleaseR(&avc->
lock); (&avc->lock)->pid_writer=0; } while (0)
;
3251 if (afs_blocksUsed - afs_blocksDiscarded >
3252 PERCENT(CM_WAITFORDRAINPCT, afs_cacheBlocks)((afs_cacheBlocks & 0xffe00000) ? ((afs_cacheBlocks) / 100
* (98)) : ((98) * (afs_cacheBlocks) / 100))
) {
3253 afs_WaitForCacheDrain = 1;
3254 afs_osi_Sleep(&afs_WaitForCacheDrain);
3255 }
3256 afs_MaybeFreeDiscardedDCache();
3257 afs_MaybeWakeupTruncateDaemon();
3258 ObtainWriteLock(&avc->lock, 509)do { ; if (!(&avc->lock)->excl_locked && !(
&avc->lock)->readers_reading) (&avc->lock) ->
excl_locked = 2; else Afs_Lock_Obtain(&avc->lock, 2);
(&avc->lock)->pid_writer = (((__curthread())->td_proc
)->p_pid ); (&avc->lock)->src_indicator = 509; }
while (0)
;
3259 }
3260 avc->f.states |= CDirty0x00000020;
3261 tdc = afs_GetDCache(avc, filePos, areq, &offset, &len, 4);
3262 if (tdc)
3263 ObtainWriteLock(&tdc->lock, 659)do { ; if (!(&tdc->lock)->excl_locked && !(
&tdc->lock)->readers_reading) (&tdc->lock) ->
excl_locked = 2; else Afs_Lock_Obtain(&tdc->lock, 2);
(&tdc->lock)->pid_writer = (((__curthread())->td_proc
)->p_pid ); (&tdc->lock)->src_indicator = 659; }
while (0)
;
3264 }
3265 } else {
3266 tdc = afs_GetDCache(avc, filePos, areq, &offset, &len, 4);
3267 if (tdc)
3268 ObtainWriteLock(&tdc->lock, 660)do { ; if (!(&tdc->lock)->excl_locked && !(
&tdc->lock)->readers_reading) (&tdc->lock) ->
excl_locked = 2; else Afs_Lock_Obtain(&tdc->lock, 2);
(&tdc->lock)->pid_writer = (((__curthread())->td_proc
)->p_pid ); (&tdc->lock)->src_indicator = 660; }
while (0)
;
3269 }
3270 if (tdc) {
3271 if (!(afs_indexFlags[tdc->index] & IFDataMod4)) {
3272 afs_stats_cmperf.cacheCurrDirtyChunks++;
3273 afs_indexFlags[tdc->index] |= IFDataMod4; /* so it doesn't disappear */
3274 }
3275 if (!(tdc->f.states & DWriting8)) {
3276 /* don't mark entry as mod if we don't have to */
3277 tdc->f.states |= DWriting8;
3278 tdc->dflags |= DFEntryMod0x02;
3279 }
3280 }
3281 return tdc;
3282}
3283
3284/*!
3285 * Make a shadow copy of a dir's dcache. It's used for disconnected
3286 * operations like remove/create/rename to keep the original directory data.
3287 * On reconnection, we can diff the original data with the server and get the
3288 * server changes and with the local data to get the local changes.
3289 *
3290 * \param avc The dir vnode.
3291 * \param adc The dir dcache.
3292 *
3293 * \return 0 for success.
3294 *
3295 * \note The vcache entry must be write locked.
3296 * \note The dcache entry must be read locked.
3297 */
3298int
3299afs_MakeShadowDir(struct vcache *avc, struct dcache *adc)
3300{
3301 int i, code, ret_code = 0, written, trans_size;
3302 struct dcache *new_dc = NULL((void *)0);
3303 struct osi_file *tfile_src, *tfile_dst;
3304 struct VenusFid shadow_fid;
3305 char *data;
3306
3307 /* Is this a dir? */
3308 if (vType(avc)((avc)->v)->v_type != VDIR)
3309 return ENOTDIR20;
3310
3311 if (avc->f.shadow.vnode || avc->f.shadow.unique)
3312 return EEXIST17;
3313
3314 /* Generate a fid for the shadow dir. */
3315 shadow_fid.Cell = avc->f.fid.Cell;
3316 shadow_fid.Fid.Volume = avc->f.fid.Fid.Volume;
3317 afs_GenShadowFid(&shadow_fid);
3318
3319 ObtainWriteLock(&afs_xdcache, 716)do { ; if (!(&afs_xdcache)->excl_locked && !(&
afs_xdcache)->readers_reading) (&afs_xdcache) -> excl_locked
= 2; else Afs_Lock_Obtain(&afs_xdcache, 2); (&afs_xdcache
)->pid_writer = (((__curthread())->td_proc)->p_pid )
; (&afs_xdcache)->src_indicator = 716; } while (0)
;
3320
3321 /* Get a fresh dcache. */
3322 new_dc = afs_AllocDCache(avc, 0, 0, &shadow_fid);
3323
3324 ObtainReadLock(&adc->mflock)do { ; if (!((&adc->mflock)->excl_locked & 2)) (
(&adc->mflock)->readers_reading)++; else Afs_Lock_Obtain
(&adc->mflock, 1); (&adc->mflock)->pid_last_reader
= (((__curthread())->td_proc)->p_pid ); } while (0)
;
3325
3326 /* Set up the new fid. */
3327 /* Copy interesting data from original dir dcache. */
3328 new_dc->mflags = adc->mflags;
3329 new_dc->dflags = adc->dflags;
3330 new_dc->f.modTime = adc->f.modTime;
3331 new_dc->f.versionNo = adc->f.versionNo;
3332 new_dc->f.states = adc->f.states;
3333 new_dc->f.chunk= adc->f.chunk;
3334 new_dc->f.chunkBytes = adc->f.chunkBytes;
3335
3336 ReleaseReadLock(&adc->mflock)do { ; if (!(--((&adc->mflock)->readers_reading)) &&
(&adc->mflock)->wait_states) Afs_Lock_ReleaseW(&
adc->mflock) ; if ( (&adc->mflock)->pid_last_reader
== (((__curthread())->td_proc)->p_pid ) ) (&adc->
mflock)->pid_last_reader =0; } while (0)
;
3337
3338 /* Now add to the two hash chains */
3339 i = DCHash(&shadow_fid, 0)((((&shadow_fid)->Fid.Vnode + (&shadow_fid)->Fid
.Volume + (0))) & (afs_dhashsize-1))
;
3340 afs_dcnextTbl[new_dc->index] = afs_dchashTbl[i];
3341 afs_dchashTbl[i] = new_dc->index;
3342
3343 i = DVHash(&shadow_fid)((((&shadow_fid)->Fid.Vnode + (&shadow_fid)->Fid
.Volume )) & (afs_dhashsize-1))
;
3344 afs_dvnextTbl[new_dc->index] = afs_dvhashTbl[i];
3345 afs_dvhashTbl[i] = new_dc->index;
3346
3347 ReleaseWriteLock(&afs_xdcache)do { ; (&afs_xdcache)->excl_locked &= ~2; if ((&
afs_xdcache)->wait_states) Afs_Lock_ReleaseR(&afs_xdcache
); (&afs_xdcache)->pid_writer=0; } while (0)
;
3348
3349 /* Alloc a 4k block. */
3350 data = afs_osi_Alloc(4096);
3351 if (!data) {
3352 afs_warn("afs_MakeShadowDir: could not alloc data\n");
3353 ret_code = ENOMEM12;
3354 goto done;
3355 }
3356
3357 /* Open the files. */
3358 tfile_src = afs_CFileOpen(&adc->f.inode)(void *)(*(afs_cacheType->open))(&adc->f.inode);
3359 tfile_dst = afs_CFileOpen(&new_dc->f.inode)(void *)(*(afs_cacheType->open))(&new_dc->f.inode);
3360
3361 /* And now copy dir dcache data into this dcache,
3362 * 4k at a time.
3363 */
3364 written = 0;
3365 while (written < adc->f.chunkBytes) {
3366 trans_size = adc->f.chunkBytes - written;
3367 if (trans_size > 4096)
3368 trans_size = 4096;
3369
3370 /* Read a chunk from the dcache. */
3371 code = afs_CFileRead(tfile_src, written, data, trans_size)(*(afs_cacheType->fread))(tfile_src, written, data, trans_size
)
;
3372 if (code < trans_size) {
3373 ret_code = EIO5;
3374 break;
3375 }
3376
3377 /* Write it to the new dcache. */
3378 code = afs_CFileWrite(tfile_dst, written, data, trans_size)(*(afs_cacheType->fwrite))(tfile_dst, written, data, trans_size
)
;
3379 if (code < trans_size) {
3380 ret_code = EIO5;
3381 break;
3382 }
3383
3384 written+=trans_size;
3385 }
3386
3387 afs_CFileClose(tfile_dst)(*(afs_cacheType->close))(tfile_dst);
3388 afs_CFileClose(tfile_src)(*(afs_cacheType->close))(tfile_src);
3389
3390 afs_osi_Free(data, 4096);
3391
3392 ReleaseWriteLock(&new_dc->lock)do { ; (&new_dc->lock)->excl_locked &= ~2; if (
(&new_dc->lock)->wait_states) Afs_Lock_ReleaseR(&
new_dc->lock); (&new_dc->lock)->pid_writer=0; } while
(0)
;
3393 afs_PutDCache(new_dc);
3394
3395 if (!ret_code) {
3396 ObtainWriteLock(&afs_xvcache, 763)do { ; if (!(&afs_xvcache)->excl_locked && !(&
afs_xvcache)->readers_reading) (&afs_xvcache) -> excl_locked
= 2; else Afs_Lock_Obtain(&afs_xvcache, 2); (&afs_xvcache
)->pid_writer = (((__curthread())->td_proc)->p_pid )
; (&afs_xvcache)->src_indicator = 763; } while (0)
;
3397 ObtainWriteLock(&afs_disconDirtyLock, 765)do { ; if (!(&afs_disconDirtyLock)->excl_locked &&
!(&afs_disconDirtyLock)->readers_reading) (&afs_disconDirtyLock
) -> excl_locked = 2; else Afs_Lock_Obtain(&afs_disconDirtyLock
, 2); (&afs_disconDirtyLock)->pid_writer = (((__curthread
())->td_proc)->p_pid ); (&afs_disconDirtyLock)->
src_indicator = 765; } while (0)
;
3398 QAdd(&afs_disconShadow, &avc->shadowq)((&avc->shadowq)->next = (&afs_disconShadow)->
next, (&avc->shadowq)->prev = (&afs_disconShadow
), (&afs_disconShadow)->next->prev = (&avc->
shadowq), (&afs_disconShadow)->next = (&avc->shadowq
))
;
3399 osi_Assert((afs_RefVCache(avc) == 0))(void)(((afs_RefVCache(avc) == 0)) || (osi_AssertFailK( "(afs_RefVCache(avc) == 0)"
, "/home/wollman/openafs/src/afs/afs_dcache.c", 3399), 0))
;
3400 ReleaseWriteLock(&afs_disconDirtyLock)do { ; (&afs_disconDirtyLock)->excl_locked &= ~2; if
((&afs_disconDirtyLock)->wait_states) Afs_Lock_ReleaseR
(&afs_disconDirtyLock); (&afs_disconDirtyLock)->pid_writer
=0; } while (0)
;
3401 ReleaseWriteLock(&afs_xvcache)do { ; (&afs_xvcache)->excl_locked &= ~2; if ((&
afs_xvcache)->wait_states) Afs_Lock_ReleaseR(&afs_xvcache
); (&afs_xvcache)->pid_writer=0; } while (0)
;
3402
3403 avc->f.shadow.vnode = shadow_fid.Fid.Vnode;
3404 avc->f.shadow.unique = shadow_fid.Fid.Unique;
3405 }
3406
3407done:
3408 return ret_code;
3409}
3410
3411/*!
3412 * Delete the dcaches of a shadow dir.
3413 *
3414 * \param avc The vcache containing the shadow fid.
3415 *
3416 * \note avc must be write locked.
3417 */
3418void
3419afs_DeleteShadowDir(struct vcache *avc)
3420{
3421 struct dcache *tdc;
3422 struct VenusFid shadow_fid;
3423
3424 shadow_fid.Cell = avc->f.fid.Cell;
3425 shadow_fid.Fid.Volume = avc->f.fid.Fid.Volume;
3426 shadow_fid.Fid.Vnode = avc->f.shadow.vnode;
3427 shadow_fid.Fid.Unique = avc->f.shadow.unique;
3428
3429 tdc = afs_FindDCacheByFid(&shadow_fid);
3430 if (tdc) {
3431 afs_HashOutDCache(tdc, 1);
3432 afs_DiscardDCache(tdc);
3433 afs_PutDCache(tdc);
3434 }
3435 avc->f.shadow.vnode = avc->f.shadow.unique = 0;
3436 ObtainWriteLock(&afs_disconDirtyLock, 708)do { ; if (!(&afs_disconDirtyLock)->excl_locked &&
!(&afs_disconDirtyLock)->readers_reading) (&afs_disconDirtyLock
) -> excl_locked = 2; else Afs_Lock_Obtain(&afs_disconDirtyLock
, 2); (&afs_disconDirtyLock)->pid_writer = (((__curthread
())->td_proc)->p_pid ); (&afs_disconDirtyLock)->
src_indicator = 708; } while (0)
;
3437 QRemove(&avc->shadowq)((&avc->shadowq)->next->prev = (&avc->shadowq
)->prev, (&avc->shadowq)->prev->next = (&
avc->shadowq)->next, (&avc->shadowq)->prev = (
(void *)0), (&avc->shadowq)->next = ((void *)0))
;
3438 ReleaseWriteLock(&afs_disconDirtyLock)do { ; (&afs_disconDirtyLock)->excl_locked &= ~2; if
((&afs_disconDirtyLock)->wait_states) Afs_Lock_ReleaseR
(&afs_disconDirtyLock); (&afs_disconDirtyLock)->pid_writer
=0; } while (0)
;
3439 afs_PutVCache(avc); /* Because we held it when we added to the queue */
3440}
3441
3442/*!
3443 * Populate a dcache with empty chunks up to a given file size,
3444 * used before extending a file in order to avoid 'holes' which
3445 * we can't access in disconnected mode.
3446 *
3447 * \param avc The vcache which is being extended (locked)
3448 * \param alen The new length of the file
3449 *
3450 */
3451void
3452afs_PopulateDCache(struct vcache *avc, afs_size_t apos, struct vrequest *areq)
3453{
3454 struct dcache *tdc;
3455 afs_size_t len, offset;
3456 afs_int32 start, end;
3457
3458 /* We're doing this to deal with the situation where we extend
3459 * by writing after lseek()ing past the end of the file . If that
3460 * extension skips chunks, then those chunks won't be created, and
3461 * GetDCache will assume that they have to be fetched from the server.
3462 * So, for each chunk between the current file position, and the new
3463 * length we GetDCache for that chunk.
3464 */
3465
3466 if (AFS_CHUNK(apos)((apos < afs_FirstCSize) ? 0 : (((apos - afs_FirstCSize) >>
afs_LogChunk) + 1))
== 0 || apos <= avc->f.m.Length)
3467 return;
3468
3469 if (avc->f.m.Length == 0)
3470 start = 0;
3471 else
3472 start = AFS_CHUNK(avc->f.m.Length)((avc->f.m.Length < afs_FirstCSize) ? 0 : (((avc->f.
m.Length - afs_FirstCSize) >> afs_LogChunk) + 1))
+1;
3473
3474 end = AFS_CHUNK(apos)((apos < afs_FirstCSize) ? 0 : (((apos - afs_FirstCSize) >>
afs_LogChunk) + 1))
;
3475
3476 while (start<end) {
3477 len = AFS_CHUNKTOSIZE(start)((start == 0) ? afs_FirstCSize : afs_OtherCSize);
3478 tdc = afs_GetDCache(avc, AFS_CHUNKTOBASE(start)((start == 0) ? 0 : ((afs_size_t) afs_FirstCSize + ((afs_size_t
) (start - 1) << afs_LogChunk)))
, areq, &offset, &len, 4);
3479 if (tdc)
3480 afs_PutDCache(tdc);
3481 start++;
3482 }
3483}