#include <CacheImpl.h>
Inheritance diagram for CacheImpl< PageT, VictimPolicyT >:
Public Member Functions | |
CacheImpl (CacheParams const &, CacheAllocator *=NULL) | |
virtual void | setAllocatedPageCount (uint nMemPages) |
Resizes this cache. | |
virtual uint | getAllocatedPageCount () |
Gets a count of how many pages currently have allocated buffers. | |
virtual uint | getMaxAllocatedPageCount () |
| |
virtual PageT * | lockPage (BlockId blockId, LockMode lockMode, bool readIfUnmapped, MappedPageListener *pMappedPageListener, TxnId txnId) |
Locks a page into memory with the specified concurrency mode. | |
virtual PageT & | lockScratchPage (BlockNum blockNum) |
Allocates a free page buffer for scratch usage. | |
virtual void | discardPage (BlockId blockId) |
Unmaps a page from the cache if already mapped, discarding its contents if dirty. | |
virtual uint | checkpointPages (PagePredicate &pagePredicate, CheckpointType checkpointType) |
Flushes and/or unmaps selected pages. | |
virtual void | collectStats (CacheStats &stats) |
Gets a snapshot of cache activity; as a side-effect, clears cumulative performance counters. | |
virtual void | registerDevice (DeviceId deviceId, SharedRandomAccessDevice pDevice) |
Registers the given device with the Cache; must be called exactly once before any other caching operations can be requested for pages of this device. | |
virtual void | unregisterDevice (DeviceId deviceId) |
Unregisters the given device from the Cache, asserting that no pages remain mapped to the specified device. | |
virtual SharedRandomAccessDevice & | getDevice (DeviceId deviceId) |
Dereferences a device ID to the registered object which represents it. | |
virtual bool | prefetchPage (BlockId blockId, MappedPageListener *pMappedPageListener) |
Hints that a page should be prefetched in preparation for a future lock request. | |
virtual void | prefetchBatch (BlockId blockId, uint nPages, MappedPageListener *pMappedPageListener) |
Hints that a contiguous run of pages should be prefetched. | |
virtual void | flushPage (CachePage &page, bool async) |
Forces the contents of a dirty page to its mapped location. | |
virtual void | unlockPage (CachePage &page, LockMode lockMode, TxnId txnId) |
Releases lock held on page. | |
virtual void | nicePage (CachePage &page) |
Marks a page as nice, indicating that it is very unlikely the page's mapping will be needed again any time soon, so it is a good candidate for victimization. | |
virtual bool | isPageMapped (BlockId blockId) |
Determines if a particular page is mapped. | |
virtual CacheAllocator & | getAllocator () const |
virtual void | getPrefetchParams (uint &prefetchPagesMax, uint &prefetchThrottleRate) |
Retrieves the current pre-fetch caching parameters that determine how many pages should be pre-fetched and how often the pre-fetches should occur. | |
virtual DeviceAccessScheduler & | getDeviceAccessScheduler (RandomAccessDevice &) |
Gets the correct access scheduler for a given device. | |
uint | getPageSize () const |
| |
virtual SharedCache | getCache () |
| |
virtual uint | getMaxLockedPages () |
| |
virtual void | setMaxLockedPages (uint nPages) |
Sets the page lock quota on this accessor. | |
virtual void | setTxnId (TxnId txnId) |
Sets a default TxnId to use for locking pages (to be used when IMPLICIT_TXN_ID is specified). | |
virtual TxnId | getTxnId () const |
| |
virtual void | writeStats (StatsTarget &target) |
Writes a current stats snapshot to a StatsTarget. | |
bool | isClosed () const |
| |
void | close () |
Closes this object, releasing any unallocated resources. | |
Static Public Member Functions | |
static SharedCache | newCache (CacheParams const &cacheParams, CacheAllocator *bufferAllocator=NULL) |
Factory method. | |
Static Public Attributes | |
static const DeviceId | NULL_DEVICE_ID |
The DeviceId assigned to the instance of RandomAccessNullDevice associated with every cache. | |
Protected Member Functions | |
void | closeImpl () |
Must be implemented by derived class to release any resources. | |
Protected Attributes | |
uint | cbPage |
bool | needsClose |
Private Types | |
enum | FlushPhase { phaseSkip, phaseInitiate, phaseWait } |
Flush state used inside of checkpointPages. More... | |
typedef PageBucket< PageT > | PageBucketT |
typedef PageBucketT::PageListIter | PageBucketIter |
typedef PageBucketT::PageListMutator | PageBucketMutator |
typedef VictimPolicyT::PageIterator | VictimPageIterator |
typedef VictimPolicyT::DirtyPageIterator | DirtyVictimPageIterator |
typedef VictimPolicyT::SharedGuard | VictimSharedGuard |
typedef VictimPolicyT::ExclusiveGuard | VictimExclusiveGuard |
Private Member Functions | |
PageT * | lookupPage (PageBucketT &bucket, BlockId blockId, bool pin) |
Finds a page by BlockId within a particular bucket. | |
PageT * | findFreePage () |
Obtains a free page (either from the free queue or by victimizing a mapped page); if none is available, suspends for a little while to help reduce cache load. | |
void | flushSomePages () |
Initiates asynchronous writes for a few dirty pages which are the best victimization candidates. | |
bool | transferPageAsync (PageT &page) |
Performs an asynchronous I/O operation on the given page. | |
bool | readPageAsync (PageT &page) |
Reads the given page asynchronously. | |
bool | writePageAsync (PageT &page) |
Writes the given page asynchronously. | |
FileSize | getPageOffset (BlockId const &blockId) |
Translates a BlockId into the byte offset of the corresponding device block. | |
PageBucketT & | getHashBucket (BlockId const &blockId) |
Gets the hash bucket containing the given BlockId. | |
void | assertCorrectBucket (PageBucketT &bucket, BlockId const &blockId) |
Verifies the match between a PageBucket and BlockId. | |
void | unmapPage (PageT &page, StrictMutexGuard &guard, bool discard) |
Unmaps a currently mapped page, but does not add it to the free list. | |
void | unmapAndFreeDiscardedPage (PageT &page, StrictMutexGuard &guard) |
Unmaps a page being discarded and adds it to unmappedBucket. | |
PageT & | mapPage (PageBucketT &bucket, PageT &newPage, BlockId blockId, MappedPageListener *pMappedPageListener, bool bPendingRead=true, bool bIncRef=true) |
Maps a page if it is not already mapped (and notifies victimPolicy of the page mapping); otherwise, finds the existing mapping (and notifies victimPolicy of the page access). | |
void | freePage (PageT &page) |
Places an unmapped page in unmappedBucket, making it available for the next call to findFreePage. | |
bool | canVictimizePage (PageT &page) |
Decides whether a page can be victimized. | |
void | incrementCounter (AtomicCounter &x) |
Increments a counter variable safely. | |
void | decrementCounter (AtomicCounter &x) |
Decrements a counter variable safely. | |
void | incrementStatsCounter (AtomicCounter &x) |
Increments a statistical counter. | |
void | decrementStatsCounter (AtomicCounter &x) |
Decrements a statistical counter. | |
void | initializeStats () |
Clears stats which are tracked since initialization. | |
void | allocatePages (CacheParams const ¶ms) |
Handles initial allocation of pages and attempts to handle any associated out-of-memory errors. | |
void | successfulPrefetch () |
Updates counters indicating a successful pre-fetch has occurred. | |
void | rejectedPrefetch () |
Updates counters indicating a pre-fetch was rejected. | |
void | ioRetry () |
Updates counters indicating I/O retry was required. | |
void | calcDirtyThreshholds (uint nCachePages) |
Calculates the number of dirty pages corresponding to the high and low water marks. | |
void | markPageDirty (CachePage &page) |
void | notifyTransferCompletion (CachePage &, bool) |
virtual uint | getTimerIntervalMillis () |
Calculates the interval which should elapse before the next call to onTimerInterval. | |
virtual void | onTimerInterval () |
Receives notification from TimerThread that interval has elapsed. | |
virtual void | onThreadStart () |
Called in new thread context before thread's body runs. | |
virtual void | onThreadEnd () |
Called in thread context after thread's body runs. | |
virtual FennelExcn * | cloneExcn (std::exception &ex) |
Clones an exception so that it can be rethrown in a different thread context. | |
Private Attributes | |
std::vector< SharedRandomAccessDevice > | deviceTable |
Collection of registered devices indexed by DeviceId; this array is of fixed size, with a NULL slot indicating that the given device ID has not been registered; this permits synchronization-free access to the collection. | |
PageBucketT | unmappedBucket |
Bucket of free unmapped pages whose buffers are still allocated. | |
PageBucketT | unallocatedBucket |
Bucket of free pages whose buffers are not allocated. | |
std::vector< PageBucketT * > | pageTable |
Array of PageBuckets indexed by BlockId hash code. | |
uint | dirtyHighWaterPercent |
Percentage of pages in the cache that must be dirty for lazy writes to be initiated. | |
uint | dirtyLowWaterPercent |
Percentage of pages in the cache that must be dirty for lazy writes to be suspended. | |
uint | dirtyHighWaterMark |
Number of dirty pages in the cache corresponding to the high-water percentage. | |
uint | dirtyLowWaterMark |
Number of dirty pages in the cache corresponding to the low-water percentage. | |
bool | inFlushMode |
Used by the lazy writer thread to indicate that the high-water dirty threshhold has been reached and page flushes should continue until the low-water threshhold is reached. | |
AtomicCounter | nCacheHits |
See CacheStats::nHits. | |
AtomicCounter | nCacheRequests |
See CacheStats::nRequests. | |
AtomicCounter | nVictimizations |
See CacheStats::nVictimizations. | |
AtomicCounter | nDirtyPages |
See CacheStats::nDirtyPages. | |
AtomicCounter | nPageReads |
See CacheStats::nPageReads. | |
AtomicCounter | nPageWrites |
See CacheStats::nPageWrites. | |
AtomicCounter | nRejectedCachePrefetches |
See CacheStats::nRejectedPrefetches. | |
AtomicCounter | nIoRetries |
See CacheStats::nIoRetries. | |
AtomicCounter | nSuccessfulCachePrefetches |
See CacheStats::nSuccessfulPrefetches. | |
AtomicCounter | nLazyWrites |
See CacheStats::nLazyWrites. | |
AtomicCounter | nLazyWriteCalls |
See CacheStats::nLazyWriteCalls. | |
AtomicCounter | nVictimizationWrites |
See CacheStats::nVictimizationWrites. | |
AtomicCounter | nCheckpointWrites |
See CacheStats::nCheckpointWrites. | |
CacheStats | statsSinceInit |
Accumulated state for all counters which are tracked since cache initialization. | |
StrictMutex | freePageMutex |
Mutex coupled with freePageCondition. | |
LocalCondition | freePageCondition |
Condition variable used for notification of free page availability. | |
std::vector< PageT * > | pages |
A fixed-size vector of pointers to cache pages; we can get away with this because currently the number of pages is fixed at initialization. | |
DeviceAccessScheduler * | pDeviceAccessScheduler |
Scheduler for asynchronous I/O. | |
CacheAllocator & | bufferAllocator |
Source of buffer memory. | |
boost::scoped_ptr< CacheAllocator > | pBufferAllocator |
Set only if bufferAllocator is owned by this cache. | |
VictimPolicyT | victimPolicy |
The realization for the VictimPolicy model. | |
TimerThread | timerThread |
Thread for running idle flush. | |
uint | idleFlushInterval |
| |
uint | prefetchPagesMax |
Maximum number of outstanding pre-fetch requests. | |
uint | prefetchThrottleRate |
The number of successful pre-fetches that have to occur before the pre-fetch rate is throttled back up, in the event that it has been throttled down due to rejected requests. |
It collaborates with the types of its template parameters (PageT, VictimPolicyT). The realization of PageT must always be derived from Page; it may have other inheritance requirements depending on the realization for VictimPolicyT.
Synchronization within the cache accomplishes three distinct goals:
Synchronization occurs at several different levels:
To avoid deadlock, lock acquisition order must follow these rules:
Note that some of the code in CacheImpl (e.g. the methods lookupPage or markPageDirty) could be moved to other classes such as PageBucket or CachePage in standard object-oriented design. However, this IS not and SHOULD not be done. Why? Because the cache synchronization logic is complex, so every effort should be made to keep it easy to understand, verify, and debug. Keeping method implementations as units rather than distributed over a number of objects helps with this.
CacheImpl implements TimerThreadClient for taking repeated action on timer callbacks (e.g. idle flush).
Definition at line 115 of file CacheImpl.h.
typedef PageBucket<PageT> CacheImpl< PageT, VictimPolicyT >::PageBucketT [private] |
Definition at line 118 of file CacheImpl.h.
typedef PageBucketT::PageListIter CacheImpl< PageT, VictimPolicyT >::PageBucketIter [private] |
Definition at line 119 of file CacheImpl.h.
typedef PageBucketT::PageListMutator CacheImpl< PageT, VictimPolicyT >::PageBucketMutator [private] |
Definition at line 120 of file CacheImpl.h.
typedef VictimPolicyT::PageIterator CacheImpl< PageT, VictimPolicyT >::VictimPageIterator [private] |
Definition at line 322 of file CacheImpl.h.
typedef VictimPolicyT::DirtyPageIterator CacheImpl< PageT, VictimPolicyT >::DirtyVictimPageIterator [private] |
Definition at line 323 of file CacheImpl.h.
typedef VictimPolicyT::SharedGuard CacheImpl< PageT, VictimPolicyT >::VictimSharedGuard [private] |
Definition at line 324 of file CacheImpl.h.
typedef VictimPolicyT::ExclusiveGuard CacheImpl< PageT, VictimPolicyT >::VictimExclusiveGuard [private] |
Definition at line 325 of file CacheImpl.h.
enum CacheImpl::FlushPhase [private] |
Flush state used inside of checkpointPages.
Definition at line 314 of file CacheImpl.h.
00314 { 00315 phaseSkip, phaseInitiate, phaseWait 00316 };
FENNEL_BEGIN_NAMESPACE CacheImpl< PageT, VictimPolicyT >::CacheImpl | ( | CacheParams const & | , | |
CacheAllocator * | = NULL | |||
) |
Definition at line 46 of file CacheMethodsImpl.h.
References CacheParams::cbPage, CacheParams::idleFlushInterval, DeviceAccessScheduler::newScheduler(), CacheParams::prefetchPagesMax, CacheParams::prefetchThrottleRate, and CacheParams::schedParams.
00049 : 00050 deviceTable(CompoundId::getMaxDeviceCount()), 00051 pageTable(), 00052 bufferAllocator( 00053 pBufferAllocatorInit ? 00054 *pBufferAllocatorInit 00055 : *new VMAllocator(params.cbPage,0)), 00056 pBufferAllocator(pBufferAllocatorInit ? NULL : &bufferAllocator), 00057 victimPolicy(params), 00058 timerThread(*this) 00059 { 00060 cbPage = params.cbPage; 00061 pDeviceAccessScheduler = NULL; 00062 inFlushMode = false; 00063 00064 // TODO - parameterize 00065 dirtyHighWaterPercent = 25; 00066 dirtyLowWaterPercent = 5; 00067 00068 initializeStats(); 00069 00070 allocatePages(params); 00071 00072 // initialize page hash table 00073 // NOTE: this is the size of the page hash table; 2N is for a 50% 00074 // load factor, and +1 is to avoid picking an even number 00075 // TODO: use a static table of primes to pick the least-upper-bound prime 00076 pageTable.resize(2*pages.size()+1); 00077 for (uint i = 0; i < pageTable.size(); i++) { 00078 pageTable[i] = new PageBucketT(); 00079 } 00080 00081 try { 00082 pDeviceAccessScheduler = DeviceAccessScheduler::newScheduler( 00083 params.schedParams); 00084 } catch (FennelExcn &ex) { 00085 close(); 00086 throw ex; 00087 } 00088 00089 // initialize null device 00090 registerDevice( 00091 NULL_DEVICE_ID, 00092 SharedRandomAccessDevice( 00093 new RandomAccessNullDevice())); 00094 00095 idleFlushInterval = params.idleFlushInterval; 00096 if (idleFlushInterval) { 00097 timerThread.start(); 00098 } 00099 00100 prefetchPagesMax = params.prefetchPagesMax; 00101 prefetchThrottleRate = params.prefetchThrottleRate; 00102 }
PageT * CacheImpl< PageT, VictimPolicyT >::lookupPage | ( | PageBucketT & | bucket, | |
BlockId | blockId, | |||
bool | pin | |||
) | [private] |
Finds a page by BlockId within a particular bucket.
If found, waits for any pending read and then increments the page reference count; also notifies victimPolicy of the page access.
bucket | the bucket to search; must be the same as getHashBucket(blockId) | |
blockId | the BlockId of the page to look for | |
pin | if true, the page will be pinned in the cache |
Definition at line 997 of file CacheMethodsImpl.h.
References CachePage::DATA_READ, PageBucket< PageT >::mutex, and PageBucket< PageT >::pageList.
00998 { 00999 assertCorrectBucket(bucket,blockId); 01000 SXMutexSharedGuard bucketGuard(bucket.mutex); 01001 for (PageBucketIter iter(bucket.pageList); iter; ++iter) { 01002 StrictMutexGuard pageGuard(iter->mutex); 01003 if (iter->getBlockId() == blockId) { 01004 victimPolicy.notifyPageAccess(*iter, pin); 01005 iter->nReferences++; 01006 while (iter->dataStatus == CachePage::DATA_READ) { 01007 iter->waitForPendingIO(pageGuard); 01008 } 01009 return iter; 01010 } 01011 } 01012 return NULL; 01013 }
PageT * CacheImpl< PageT, VictimPolicyT >::findFreePage | ( | ) | [private] |
Obtains a free page (either from the free queue or by victimizing a mapped page); if none is available, suspends for a little while to help reduce cache load.
The returned page is clean, unmapped, and ready to be remapped.
Definition at line 1017 of file CacheMethodsImpl.h.
References convertTimeout(), and IntrusiveListMutator< T, DerivedListNode >::detach().
Referenced by CacheImpl< PageT, VictimPolicyT >::setAllocatedPageCount().
01018 { 01019 // Check unmappedBucket first. Note the use of the double-checked locking 01020 // idiom here; it's OK because perfect accuracy is not required. Under 01021 // steady-state conditions, unmappedBucket will be empty, so avoiding 01022 // unnecessary locking is a worthwhile optimization. 01023 if (unmappedBucket.pageList.size()) { 01024 SXMutexExclusiveGuard unmappedBucketGuard(unmappedBucket.mutex); 01025 PageBucketMutator mutator(unmappedBucket.pageList); 01026 if (mutator) { 01027 assert(!mutator->hasBlockId()); 01028 return mutator.detach(); 01029 } 01030 } 01031 // search for a victimizable page, trying pages in the order recommended 01032 // by victimPolicy 01033 uint nToFlush = 10; 01034 01035 VictimSharedGuard victimSharedGuard(victimPolicy.getMutex()); 01036 std::pair<VictimPageIterator,VictimPageIterator> victimRange( 01037 victimPolicy.getVictimRange()); 01038 for (; victimRange.first != victimRange.second; ++(victimRange.first)) { 01039 PageT &page = *(victimRange.first); 01040 // if page mutex is unavailable, just skip it 01041 StrictMutexGuard pageGuard(page.mutex, boost::try_to_lock); 01042 if (!pageGuard.owns_lock()) { 01043 continue; 01044 } 01045 if (canVictimizePage(page)) { 01046 if (page.isDirty()) { 01047 // can't victimize a dirty page; kick off an async write 01048 // and maybe later when we come back to try again it will 01049 // be available 01050 if (!nToFlush) { 01051 continue; 01052 } 01053 if (page.pMappedPageListener && 01054 !page.pMappedPageListener->canFlushPage(page)) 01055 { 01056 continue; 01057 } 01058 nToFlush--; 01059 incrementStatsCounter(nVictimizationWrites); 01060 // If the write request required retry, don't submit any 01061 // additional write requests in this loop 01062 if (!writePageAsync(page)) { 01063 nToFlush = 0; 01064 } 01065 continue; 01066 } 01067 // NOTE: have to do this early since unmapPage will 01068 // call back into victimPolicy, which could deadlock 01069 victimSharedGuard.unlock(); 01070 unmapPage(page,pageGuard,false); 01071 incrementStatsCounter(nVictimizations); 01072 return &page; 01073 } 01074 } 01075 victimSharedGuard.unlock(); 01076 01077 // no free pages, so wait for one (with timeout just in case) 01078 StrictMutexGuard freePageGuard(freePageMutex); 01079 boost::xtime atv; 01080 convertTimeout(100,atv); 01081 freePageCondition.timed_wait(freePageGuard,atv); 01082 return NULL; 01083 }
void CacheImpl< PageT, VictimPolicyT >::flushSomePages | ( | ) | [private] |
Initiates asynchronous writes for a few dirty pages which are the best victimization candidates.
Definition at line 1155 of file CacheMethodsImpl.h.
References LOCKMODE_S, and LOCKMODE_S_NOWAIT.
01156 { 01157 // TODO: parameterize 01158 uint nToFlush = std::min<uint>(5,nDirtyPages); 01159 if (!nToFlush) { 01160 // in case there aren't any dirty buffers to start with 01161 return; 01162 } 01163 01164 // Only flush if we're within the dirty threshholds 01165 if (!inFlushMode) { 01166 if (nDirtyPages < dirtyHighWaterMark) { 01167 return; 01168 } 01169 inFlushMode = true; 01170 } 01171 if (nDirtyPages < dirtyLowWaterMark) { 01172 inFlushMode = false; 01173 return; 01174 } 01175 01176 incrementStatsCounter(nLazyWriteCalls); 01177 uint nFlushed = 0; 01178 VictimSharedGuard victimSharedGuard(victimPolicy.getMutex()); 01179 std::pair<DirtyVictimPageIterator,DirtyVictimPageIterator> victimRange( 01180 victimPolicy.getDirtyVictimRange()); 01181 for (; victimRange.first != victimRange.second; ++(victimRange.first)) { 01182 PageT &page = *(victimRange.first); 01183 // if page mutex is unavailable, just skip it 01184 StrictMutexGuard pageGuard(page.mutex, boost::try_to_lock); 01185 if (!pageGuard.owns_lock()) { 01186 continue; 01187 } 01188 if (!page.isDirty()) { 01189 continue; 01190 } 01191 if (page.isScratchLocked()) { 01192 // someone has the page scratch-locked 01193 continue; 01194 } 01195 if (!page.lock.waitFor(LOCKMODE_S_NOWAIT)) { 01196 // someone has the page write-locked 01197 continue; 01198 } else { 01199 // release our test lock just acquired 01200 page.lock.release(LOCKMODE_S); 01201 } 01202 if (page.pMappedPageListener && 01203 !page.pMappedPageListener->canFlushPage(page)) 01204 { 01205 continue; 01206 } 01207 incrementStatsCounter(nLazyWrites); 01208 // If the write request required retry, don't submit any 01209 // additional write requests 01210 if (!writePageAsync(page)) { 01211 break; 01212 } 01213 nFlushed++; 01214 if (nFlushed >= nToFlush) { 01215 break; 01216 } 01217 } 01218 }
bool CacheImpl< PageT, VictimPolicyT >::transferPageAsync | ( | PageT & | page | ) | [private] |
Performs an asynchronous I/O operation on the given page.
The page's ID and dataStatus should already be defined.
page | page to transfer |
Definition at line 1316 of file CacheMethodsImpl.h.
References RandomAccessRequest::bindingList, RandomAccessRequest::cbOffset, RandomAccessRequest::cbTransfer, CachePage::DATA_READ, CachePage::DATA_WRITE, CompoundId::getDeviceId(), RandomAccessRequest::pDevice, RandomAccessRequest::READ, RandomAccessRequest::type, and RandomAccessRequest::WRITE.
01317 { 01318 SharedRandomAccessDevice &pDevice = 01319 getDevice(CompoundId::getDeviceId(page.getBlockId())); 01320 RandomAccessRequest request; 01321 request.pDevice = pDevice.get(); 01322 request.cbOffset = getPageOffset(page.getBlockId()); 01323 request.cbTransfer = getPageSize(); 01324 if (page.dataStatus == CachePage::DATA_WRITE) { 01325 request.type = RandomAccessRequest::WRITE; 01326 } else { 01327 assert(page.dataStatus == CachePage::DATA_READ); 01328 request.type = RandomAccessRequest::READ; 01329 } 01330 request.bindingList.push_back(page); 01331 bool rc = getDeviceAccessScheduler(*pDevice).schedule(request); 01332 if (!rc) { 01333 ioRetry(); 01334 } 01335 return rc; 01336 }
bool CacheImpl< PageT, VictimPolicyT >::readPageAsync | ( | PageT & | page | ) | [inline, private] |
Reads the given page asynchronously.
page | page to read |
Definition at line 1347 of file CacheMethodsImpl.h.
References CachePage::DATA_READ.
01348 { 01349 page.dataStatus = CachePage::DATA_READ; 01350 incrementStatsCounter(nPageReads); 01351 return transferPageAsync(page); 01352 }
bool CacheImpl< PageT, VictimPolicyT >::writePageAsync | ( | PageT & | page | ) | [inline, private] |
Writes the given page asynchronously.
page | page to write |
Definition at line 1356 of file CacheMethodsImpl.h.
References CachePage::DATA_WRITE.
01357 { 01358 assert(page.isDirty()); 01359 if (page.pMappedPageListener) { 01360 assert(page.pMappedPageListener->canFlushPage(page)); 01361 page.pMappedPageListener->notifyBeforePageFlush(page); 01362 } 01363 page.dataStatus = CachePage::DATA_WRITE; 01364 incrementStatsCounter(nPageWrites); 01365 if (!transferPageAsync(page)) { 01366 return false; 01367 } else { 01368 return true; 01369 } 01370 }
FileSize CacheImpl< PageT, VictimPolicyT >::getPageOffset | ( | BlockId const & | blockId | ) | [inline, private] |
Translates a BlockId into the byte offset of the corresponding device block.
blockId | the BlockId to translate |
Definition at line 1374 of file CacheMethodsImpl.h.
References CompoundId::getBlockNum().
01375 { 01376 return ((FileSize) CompoundId::getBlockNum(blockId)) 01377 * (FileSize) cbPage; 01378 }
PageBucket< PageT > & CacheImpl< PageT, VictimPolicyT >::getHashBucket | ( | BlockId const & | blockId | ) | [inline, private] |
Gets the hash bucket containing the given BlockId.
blockId | the BlockId being sought |
Definition at line 1382 of file CacheMethodsImpl.h.
01383 { 01384 std::hash<BlockId> hasher; 01385 size_t hashCode = hasher(blockId); 01386 return *(pageTable[hashCode % pageTable.size()]); 01387 }
void CacheImpl< PageT, VictimPolicyT >::assertCorrectBucket | ( | PageBucketT & | bucket, | |
BlockId const & | blockId | |||
) | [inline, private] |
Verifies the match between a PageBucket and BlockId.
Some methods (e.g. lockPage) precompute the correct bucket for a BlockId parameter and then make calls to helper methods (e.g. lookupPage, mapPage) which can skip the bucket lookup. This assertion allows the helper methods to verify the calling logic.
Definition at line 1391 of file CacheMethodsImpl.h.
01392 { 01393 assert(&bucket == &(getHashBucket(blockId))); 01394 }
void CacheImpl< PageT, VictimPolicyT >::unmapPage | ( | PageT & | page, | |
StrictMutexGuard & | guard, | |||
bool | discard | |||
) | [private] |
Unmaps a currently mapped page, but does not add it to the free list.
Also notifies victimPolicy of the unmapping. The page must have no outstanding references.
page | a currently mapped page to be unmapped | |
guard | a guard on page.mutex, which must already be held by the calling thread when this method is invoked, and which will be released when the method returns | |
discard | if true, the page is being discarded from the cache |
Definition at line 1222 of file CacheMethodsImpl.h.
References CachePage::DATA_INVALID, PageBucket< PageT >::mutex, NULL_BLOCK_ID, and PageBucket< PageT >::pageList.
01223 { 01224 assert(!page.nReferences); 01225 assert(pageGuard.owns_lock()); 01226 01227 victimPolicy.notifyPageUnmap(page, discard); 01228 if (page.pMappedPageListener) { 01229 page.pMappedPageListener->notifyPageUnmap(page); 01230 page.pMappedPageListener = NULL; 01231 } 01232 if (page.isDirty()) { 01233 decrementCounter(nDirtyPages); 01234 } 01235 01236 // NOTE: to get the locking sequence safe for deadlock avoidance, 01237 // we're going to have to release the page mutex. To indicate that the 01238 // page is being unmapped (so that no one else tries to lock it or 01239 // victimize it), we first clear the BlockId, saving it for our own use. 01240 BlockId blockId = page.getBlockId(); 01241 page.blockId = NULL_BLOCK_ID; 01242 page.dataStatus = CachePage::DATA_INVALID; 01243 pageGuard.unlock(); 01244 01245 PageBucketT &bucket = getHashBucket(blockId); 01246 SXMutexExclusiveGuard bucketGuard(bucket.mutex); 01247 bool bFound = bucket.pageList.remove(page); 01248 assert(bFound); 01249 }
void CacheImpl< PageT, VictimPolicyT >::unmapAndFreeDiscardedPage | ( | PageT & | page, | |
StrictMutexGuard & | guard | |||
) | [private] |
Unmaps a page being discarded and adds it to unmappedBucket.
Also notifies victimPolicy of the unmapping. If the page is dirty, it is not flushed. However, any pending I/O is allowed to complete before discard.
page | a currently mapped page to be unmapped | |
guard | a guard on page.mutex, which must already be held by the calling thread when this method is invoked, and which will be released and reacquired during the method's execution |
Definition at line 1253 of file CacheMethodsImpl.h.
01254 { 01255 while (page.isTransferInProgress()) { 01256 page.waitForPendingIO(pageGuard); 01257 } 01258 unmapPage(page,pageGuard,true); 01259 pageGuard.lock(); 01260 assert(!page.nReferences); 01261 freePage(page); 01262 }
PageT & CacheImpl< PageT, VictimPolicyT >::mapPage | ( | PageBucketT & | bucket, | |
PageT & | newPage, | |||
BlockId | blockId, | |||
MappedPageListener * | pMappedPageListener, | |||
bool | bPendingRead = true , |
|||
bool | bIncRef = true | |||
) | [private] |
Maps a page if it is not already mapped (and notifies victimPolicy of the page mapping); otherwise, finds the existing mapping (and notifies victimPolicy of the page access).
bucket | the bucket to contain the Page; must be the same as getHashBucket(blockId) | |
newPage | a free page to map (previously obtained from findFreePage); if mapPage finds an existing mapping, newPage is automatically freed | |
blockId | the BlockId to be mapped | |
pMappedPageListener | the MappedPageListener to be associated with the mapped page | |
bPendingRead | true if a newly mapped page should be marked for a pending read (this is ignored if the page was already mapped) | |
bIncRef | true if the returned page should have its reference count incremented as a side effect |
Definition at line 1266 of file CacheMethodsImpl.h.
References CachePage::DATA_READ, CompoundId::getDeviceId(), PageBucket< PageT >::mutex, MappedPageListener::notifyPageMap(), PageBucket< PageT >::pageList, and SXMutexGuard< lockMode >::unlock().
01270 { 01271 assert(!page.hasBlockId()); 01272 assert(!page.isDirty()); 01273 assert(getDevice(CompoundId::getDeviceId(blockId)).get()); 01274 assertCorrectBucket(bucket,blockId); 01275 01276 // check existing pages in hash bucket in case someone else just mapped the 01277 // same page 01278 SXMutexExclusiveGuard bucketGuard(bucket.mutex); 01279 for (PageBucketIter iter(bucket.pageList); iter; ++iter) { 01280 StrictMutexGuard pageGuard(iter->mutex); 01281 if (iter->getBlockId() == blockId) { 01282 // blockId already mapped; discard new page and return existing page 01283 freePage(page); 01284 if (bIncRef) { 01285 iter->nReferences++; 01286 } 01287 bucketGuard.unlock(); 01288 assert(pMappedPageListener == iter->pMappedPageListener); 01289 victimPolicy.notifyPageAccess(*iter, bIncRef); 01290 return *iter; 01291 } 01292 } 01293 01294 // not found: add new page instead 01295 StrictMutexGuard pageGuard(page.mutex); 01296 page.blockId = blockId; 01297 assert(!page.pMappedPageListener); 01298 page.pMappedPageListener = pMappedPageListener; 01299 if (bIncRef) { 01300 page.nReferences++; 01301 } 01302 if (bPendingRead) { 01303 page.dataStatus = CachePage::DATA_READ; 01304 } 01305 bucket.pageList.push_back(page); 01306 bucketGuard.unlock(); 01307 victimPolicy.notifyPageMap(page, bIncRef); 01308 if (pMappedPageListener) { 01309 pMappedPageListener->notifyPageMap(page); 01310 } 01311 return page; 01312 }
void CacheImpl< PageT, VictimPolicyT >::freePage | ( | PageT & | page | ) | [inline, private] |
Places an unmapped page in unmappedBucket, making it available for the next call to findFreePage.
page | the page to be freed |
Definition at line 1398 of file CacheMethodsImpl.h.
Referenced by CacheImpl< PageT, VictimPolicyT >::setAllocatedPageCount().
01399 { 01400 SXMutexExclusiveGuard unmappedBucketGuard(unmappedBucket.mutex); 01401 unmappedBucket.pageList.push_back(page); 01402 }
bool CacheImpl< PageT, VictimPolicyT >::canVictimizePage | ( | PageT & | page | ) | [inline, private] |
Decides whether a page can be victimized.
The page must be mapped, have no references, and no pending I/O.
page | the page to test; the page's mutex must already be held by the calling thread |
Definition at line 1406 of file CacheMethodsImpl.h.
01407 { 01408 // NOTE: the hasBlockId() check is to prevent us from trying to 01409 // victimize a page that is in transit between the free list and 01410 // a mapping; maybe such pages should have nReferences 01411 // non-zero instead? 01412 return page.hasBlockId() 01413 && !page.nReferences 01414 && !page.isTransferInProgress(); 01415 }
void CacheImpl< PageT, VictimPolicyT >::incrementCounter | ( | AtomicCounter & | x | ) | [inline, private] |
Increments a counter variable safely.
x | reference to counter to be updated |
Definition at line 1419 of file CacheMethodsImpl.h.
void CacheImpl< PageT, VictimPolicyT >::decrementCounter | ( | AtomicCounter & | x | ) | [inline, private] |
Decrements a counter variable safely.
x | reference to counter to be updated |
Definition at line 1426 of file CacheMethodsImpl.h.
void CacheImpl< PageT, VictimPolicyT >::incrementStatsCounter | ( | AtomicCounter & | x | ) | [inline, private] |
Increments a statistical counter.
Can be defined to NOP to increase cache performance if statistics aren't important.
x | reference to counter to be updated |
Definition at line 1433 of file CacheMethodsImpl.h.
01434 { 01435 incrementCounter(x); 01436 }
void CacheImpl< PageT, VictimPolicyT >::decrementStatsCounter | ( | AtomicCounter & | x | ) | [inline, private] |
Decrements a statistical counter.
Can be defined to NOP to increase cache performance if statistics aren't important.
x | reference to counter to be updated |
Definition at line 1440 of file CacheMethodsImpl.h.
01441 { 01442 decrementCounter(x); 01443 }
void CacheImpl< PageT, VictimPolicyT >::initializeStats | ( | ) | [private] |
Clears stats which are tracked since initialization.
Definition at line 114 of file CacheMethodsImpl.h.
References CacheStats::nCheckpointWrites, CacheStats::nCheckpointWritesSinceInit, CacheStats::nDirtyPages, CacheStats::nHits, CacheStats::nHitsSinceInit, CacheStats::nIoRetries, CacheStats::nIoRetriesSinceInit, CacheStats::nLazyWriteCalls, CacheStats::nLazyWriteCallsSinceInit, CacheStats::nLazyWrites, CacheStats::nLazyWritesSinceInit, CacheStats::nMemPagesAllocated, CacheStats::nMemPagesMax, CacheStats::nMemPagesUnused, CacheStats::nPageReads, CacheStats::nPageReadsSinceInit, CacheStats::nPageWrites, CacheStats::nPageWritesSinceInit, CacheStats::nRejectedPrefetches, CacheStats::nRejectedPrefetchesSinceInit, CacheStats::nRequests, CacheStats::nRequestsSinceInit, CacheStats::nSuccessfulPrefetches, CacheStats::nSuccessfulPrefetchesSinceInit, CacheStats::nVictimizations, CacheStats::nVictimizationsSinceInit, CacheStats::nVictimizationWrites, CacheStats::nVictimizationWritesSinceInit, and CacheImpl< PageT, VictimPolicyT >::statsSinceInit.
00115 { 00116 // clear instantaneous counters too just to avoid confusion 00117 statsSinceInit.nHits = 0; 00118 statsSinceInit.nHitsSinceInit = 0; 00119 statsSinceInit.nRequests = 0; 00120 statsSinceInit.nRequestsSinceInit = 0; 00121 statsSinceInit.nVictimizations = 0; 00122 statsSinceInit.nVictimizationsSinceInit = 0; 00123 statsSinceInit.nDirtyPages = 0; 00124 statsSinceInit.nPageReads = 0; 00125 statsSinceInit.nPageReadsSinceInit = 0; 00126 statsSinceInit.nPageWrites = 0; 00127 statsSinceInit.nPageWritesSinceInit = 0; 00128 statsSinceInit.nRejectedPrefetches = 0; 00129 statsSinceInit.nRejectedPrefetchesSinceInit = 0; 00130 statsSinceInit.nIoRetries = 0; 00131 statsSinceInit.nIoRetriesSinceInit = 0; 00132 statsSinceInit.nSuccessfulPrefetches = 0; 00133 statsSinceInit.nSuccessfulPrefetchesSinceInit = 0; 00134 statsSinceInit.nLazyWrites = 0; 00135 statsSinceInit.nLazyWritesSinceInit = 0; 00136 statsSinceInit.nLazyWriteCalls = 0; 00137 statsSinceInit.nLazyWriteCallsSinceInit = 0; 00138 statsSinceInit.nVictimizationWrites = 0; 00139 statsSinceInit.nVictimizationWritesSinceInit = 0; 00140 statsSinceInit.nCheckpointWrites = 0; 00141 statsSinceInit.nCheckpointWritesSinceInit = 0; 00142 statsSinceInit.nMemPagesAllocated = 0; 00143 statsSinceInit.nMemPagesUnused = 0; 00144 statsSinceInit.nMemPagesMax = 0; 00145 }
void CacheImpl< PageT, VictimPolicyT >::allocatePages | ( | CacheParams const & | params | ) | [private] |
Handles initial allocation of pages and attempts to handle any associated out-of-memory errors.
Definition at line 148 of file CacheMethodsImpl.h.
References CacheAllocator::allocate(), CacheImpl< PageT, VictimPolicyT >::bufferAllocator, CacheImpl< PageT, VictimPolicyT >::calcDirtyThreshholds(), ClosableObject::close(), CacheAllocator::deallocate(), CacheParams::defaultMemPagesInit, CacheParams::defaultMemPagesMax, deleteAndNullify(), min(), CacheParams::nMemPagesInit, CacheParams::nMemPagesMax, PageBucket< PageT >::pageList, CacheImpl< PageT, VictimPolicyT >::pages, CacheImpl< PageT, VictimPolicyT >::unallocatedBucket, CacheImpl< PageT, VictimPolicyT >::unmappedBucket, and CacheImpl< PageT, VictimPolicyT >::victimPolicy.
00149 { 00150 static const int allocErrorMsgSize = 255; 00151 uint nPagesMax = 0; 00152 uint nPagesInit = 0; 00153 00154 // Make two attempts: First, use the configured values. If that fails, 00155 // try again with default nMemPagesMax. If that fails, throw in the towel. 00156 for (int attempts = 0; attempts < 2; attempts++) { 00157 bool allocError = false; 00158 int allocErrorCode = 0; 00159 char allocErrorMsg[allocErrorMsgSize + 1] = { 0 }; 00160 00161 nPagesMax = params.nMemPagesMax; 00162 nPagesInit = params.nMemPagesInit; 00163 00164 try { 00165 if (attempts != 0) { 00166 nPagesMax = CacheParams::defaultMemPagesMax; 00167 nPagesInit = CacheParams::defaultMemPagesInit; 00168 } 00169 00170 pages.clear(); 00171 if (pages.capacity() > nPagesMax) { 00172 // Reset capacity of pages to a smaller value by swapping pages 00173 // with a temporary vector that has tiny capacity. (Avoid 00174 // zero capacity since that causes a memset warning.) 00175 std::vector<PageT *>(1).swap(pages); 00176 } 00177 pages.reserve(nPagesMax); 00178 pages.assign(nPagesMax, NULL); 00179 00180 // allocate pages, but defer adding all of them onto the free list 00181 for (uint i = 0; i < nPagesMax; i++) { 00182 PBuffer pBuffer = NULL; 00183 if (i < nPagesInit) { 00184 pBuffer = static_cast<PBuffer>( 00185 bufferAllocator.allocate(&allocErrorCode)); 00186 if (pBuffer == NULL) { 00187 allocError = true; 00188 strncpy( 00189 allocErrorMsg, "mmap failed", allocErrorMsgSize); 00190 break; 00191 } 00192 } 00193 PageT &page = *new PageT(*this,pBuffer); 00194 pages[i] = &page; 00195 } 00196 } catch (std::exception &excn) { 00197 allocError = true; 00198 allocErrorCode = 0; 00199 if (dynamic_cast<std::bad_alloc *>(&excn) != NULL) { 00200 strncpy(allocErrorMsg, "malloc failed", allocErrorMsgSize); 00201 } else { 00202 strncpy(allocErrorMsg, excn.what(), allocErrorMsgSize); 00203 } 00204 } 00205 00206 if (!allocError) { 00207 // successful allocation 00208 break; 00209 } 00210 00211 // Free the allocated pages 00212 for (uint i = 0; i < pages.size(); i++) { 00213 if (!pages[i]) { 00214 break; 00215 } 00216 PBuffer pBuffer = pages[i]->pBuffer; 00217 deleteAndNullify(pages[i]); 00218 if (pBuffer) { 00219 // Ignore any error. We are sometimes unable to deallocate 00220 // pages when trying to recover from initial failure. Likely 00221 // the second attempt will fail as well. This leads to a 00222 // failed assertion in the VMAllocator destructor. See the 00223 // comment there. 00224 bufferAllocator.deallocate(pBuffer); 00225 } 00226 } 00227 00228 if (attempts != 0) { 00229 // Reduced page count still failed. Give up. 00230 close(); 00231 throw SysCallExcn(std::string(allocErrorMsg), allocErrorCode); 00232 } 00233 } 00234 00235 // Go back and add the pages to the free list and register them with 00236 // victimPolicy (requires no further memory allocation as the free lists 00237 // and victim policy use IntrusiveList and IntrusiveDList). 00238 for (uint i = 0; i < pages.size(); i++) { 00239 PageT *page = pages[i]; 00240 PBuffer pBuffer = page->pBuffer; 00241 if (pBuffer) { 00242 unmappedBucket.pageList.push_back(*page); 00243 victimPolicy.registerPage(*page); 00244 } else { 00245 unallocatedBucket.pageList.push_back(*page); 00246 } 00247 } 00248 00249 uint nPages = std::min(nPagesInit, nPagesMax); 00250 calcDirtyThreshholds(nPages); 00251 victimPolicy.setAllocatedPageCount(nPages); 00252 }
void CacheImpl< PageT, VictimPolicyT >::successfulPrefetch | ( | ) | [private] |
Updates counters indicating a successful pre-fetch has occurred.
Definition at line 677 of file CacheMethodsImpl.h.
00678 { 00679 incrementStatsCounter(nSuccessfulCachePrefetches); 00680 }
void CacheImpl< PageT, VictimPolicyT >::rejectedPrefetch | ( | ) | [private] |
Updates counters indicating a pre-fetch was rejected.
Definition at line 684 of file CacheMethodsImpl.h.
00685 { 00686 incrementStatsCounter(nRejectedCachePrefetches); 00687 }
void CacheImpl< PageT, VictimPolicyT >::ioRetry | ( | ) | [private] |
Updates counters indicating I/O retry was required.
Definition at line 691 of file CacheMethodsImpl.h.
00692 { 00693 incrementStatsCounter(nIoRetries); 00694 }
void CacheImpl< PageT, VictimPolicyT >::calcDirtyThreshholds | ( | uint | nCachePages | ) | [private] |
Calculates the number of dirty pages corresponding to the high and low water marks.
nCachePages | number of cache pages |
Definition at line 255 of file CacheMethodsImpl.h.
References CacheImpl< PageT, VictimPolicyT >::dirtyHighWaterMark, CacheImpl< PageT, VictimPolicyT >::dirtyHighWaterPercent, CacheImpl< PageT, VictimPolicyT >::dirtyLowWaterMark, and CacheImpl< PageT, VictimPolicyT >::dirtyLowWaterPercent.
Referenced by CacheImpl< PageT, VictimPolicyT >::allocatePages(), and CacheImpl< PageT, VictimPolicyT >::setAllocatedPageCount().
00256 { 00257 dirtyHighWaterMark = nCachePages * dirtyHighWaterPercent / 100; 00258 dirtyLowWaterMark = nCachePages * dirtyLowWaterPercent / 100; 00259 }
void CacheImpl< PageT, VictimPolicyT >::markPageDirty | ( | CachePage & | page | ) | [private, virtual] |
Implements Cache.
Definition at line 905 of file CacheMethodsImpl.h.
References CachePage::DATA_DIRTY, CachePage::dataStatus, CachePage::isDataValid(), and CachePage::mutex.
00906 { 00907 StrictMutexGuard pageGuard(page.mutex); 00908 incrementCounter(nDirtyPages); 00909 bool bValid = page.isDataValid(); 00910 page.dataStatus = CachePage::DATA_DIRTY; 00911 victimPolicy.notifyPageDirty(static_cast<PageT &>(page)); 00912 00913 // No synchronization required during notification because caller already 00914 // holds exclusive lock on page. The notification is called AFTER the page 00915 // has already been marked dirty in case the listener needs to write to 00916 // the page (otherwise an infinite loop would occur). 00917 pageGuard.unlock(); 00918 if (page.pMappedPageListener) { 00919 page.pMappedPageListener->notifyPageDirty(page,bValid); 00920 } 00921 }
void CacheImpl< PageT, VictimPolicyT >::notifyTransferCompletion | ( | CachePage & | , | |
bool | ||||
) | [private, virtual] |
Implements Cache.
Definition at line 858 of file CacheMethodsImpl.h.
References CachePage::DATA_CLEAN, CachePage::DATA_ERROR, CachePage::DATA_READ, CachePage::DATA_WRITE, CachePage::dataStatus, CachePage::getBlockId(), CachePage::ioCompletionCondition, CachePage::mutex, MappedPageListener::notifyAfterPageFlush(), MappedPageListener::notifyAfterPageRead(), opaqueToInt(), and CachePage::pMappedPageListener.
00859 { 00860 StrictMutexGuard pageGuard(page.mutex); 00861 // NOTE: A write failure is always a panic, because there's nothing we 00862 // can do to recover from it. However, read failures may be expected under 00863 // some recovery conditions, and will be detected as an assertion when the 00864 // caller invokes readablePage() on the locked page. Callers in recovery 00865 // can use isDataValid() to avoid the assertion. 00866 switch (page.dataStatus) { 00867 case CachePage::DATA_WRITE: 00868 { 00869 if (!bSuccess) { 00870 std::cerr << "Write failed for page 0x" << std::hex << 00871 opaqueToInt(page.getBlockId()); 00872 ::abort(); 00873 } 00874 decrementCounter(nDirtyPages); 00875 victimPolicy.notifyPageClean(static_cast<PageT &>(page)); 00876 // let waiting threads know that this page may now be available 00877 // for victimization 00878 freePageCondition.notify_all(); 00879 } 00880 break; 00881 case CachePage::DATA_READ: 00882 break; 00883 default: 00884 permAssert(false); 00885 break; 00886 } 00887 if (bSuccess) { 00888 CachePage::DataStatus oldStatus = page.dataStatus; 00889 page.dataStatus = CachePage::DATA_CLEAN; 00890 if (page.pMappedPageListener) { 00891 if (oldStatus == CachePage::DATA_READ) { 00892 page.pMappedPageListener->notifyAfterPageRead(page); 00893 } else { 00894 page.pMappedPageListener->notifyAfterPageFlush(page); 00895 } 00896 } 00897 } else { 00898 page.dataStatus = CachePage::DATA_ERROR; 00899 } 00900 page.ioCompletionCondition.notify_all(); 00901 }
uint CacheImpl< PageT, VictimPolicyT >::getTimerIntervalMillis | ( | ) | [private, virtual] |
Calculates the interval which should elapse before the next call to onTimerInterval.
This can be different each time. A return value of 0 will cause the TimerThread to cease calling back.
Implements TimerThreadClient.
Definition at line 929 of file CacheMethodsImpl.h.
00930 { 00931 return idleFlushInterval; 00932 }
void CacheImpl< PageT, VictimPolicyT >::onTimerInterval | ( | ) | [private, virtual] |
Receives notification from TimerThread that interval has elapsed.
Implements TimerThreadClient.
Definition at line 936 of file CacheMethodsImpl.h.
00937 { 00938 flushSomePages(); 00939 }
void CacheImpl< PageT, VictimPolicyT >::closeImpl | ( | ) | [protected, virtual] |
Must be implemented by derived class to release any resources.
Implements ClosableObject.
Definition at line 946 of file CacheMethodsImpl.h.
References CacheImpl< PageT, VictimPolicyT >::bufferAllocator, CacheAllocator::deallocate(), deleteAndNullify(), CacheImpl< PageT, VictimPolicyT >::deviceTable, CacheImpl< PageT, VictimPolicyT >::getDevice(), Thread::isStarted(), Cache::NULL_DEVICE_ID, PageBucket< PageT >::pageList, CacheImpl< PageT, VictimPolicyT >::pages, CacheImpl< PageT, VictimPolicyT >::pageTable, CacheImpl< PageT, VictimPolicyT >::pDeviceAccessScheduler, DeviceAccessScheduler::stop(), TimerThread::stop(), CacheImpl< PageT, VictimPolicyT >::timerThread, CacheImpl< PageT, VictimPolicyT >::unallocatedBucket, CacheImpl< PageT, VictimPolicyT >::unmappedBucket, CacheImpl< PageT, VictimPolicyT >::unregisterDevice(), and CacheImpl< PageT, VictimPolicyT >::victimPolicy.
00947 { 00948 if (timerThread.isStarted()) { 00949 timerThread.stop(); 00950 } 00951 00952 if (pDeviceAccessScheduler) { 00953 pDeviceAccessScheduler->stop(); 00954 } 00955 00956 // unregister the null device 00957 if (getDevice(NULL_DEVICE_ID)) { 00958 unregisterDevice(NULL_DEVICE_ID); 00959 } 00960 00961 // make sure all devices got unregistered 00962 for (uint i = 0; i < deviceTable.size(); i++) { 00963 assert(!deviceTable[i]); 00964 } 00965 00966 deleteAndNullify(pDeviceAccessScheduler); 00967 00968 // clean up page hash table 00969 for (uint i = 0; i < pageTable.size(); i++) { 00970 // all pages should already have been unmapped 00971 assert(!pageTable[i]->pageList.size()); 00972 deleteAndNullify(pageTable[i]); 00973 } 00974 00975 unmappedBucket.pageList.clear(); 00976 unallocatedBucket.pageList.clear(); 00977 00978 // deallocate all pages 00979 for (uint i = 0; i < pages.size(); i++) { 00980 if (!pages[i]) { 00981 continue; 00982 } 00983 victimPolicy.unregisterPage(*(pages[i])); 00984 PBuffer pBuffer = pages[i]->pBuffer; 00985 if (pBuffer) { 00986 int errorCode; 00987 if (bufferAllocator.deallocate(pBuffer, &errorCode)) { 00988 throw SysCallExcn("munmap failed", errorCode); 00989 } 00990 } 00991 deleteAndNullify(pages[i]); 00992 } 00993 }
void CacheImpl< PageT, VictimPolicyT >::setAllocatedPageCount | ( | uint | nMemPages | ) | [virtual] |
Resizes this cache.
If nMemPages is greater than the number of page buffers currently allocated, allocates more. If less, frees some (victimizing as necessary).
nMemPages | desired buffer allocation count; must be less than the nMemPagesMax specified when the cache was created, and also less than the current clean page target |
Implements Cache.
Definition at line 275 of file CacheMethodsImpl.h.
References CacheAllocator::allocate(), CacheImpl< PageT, VictimPolicyT >::bufferAllocator, CacheImpl< PageT, VictimPolicyT >::calcDirtyThreshholds(), CacheAllocator::deallocate(), IntrusiveListMutator< T, DerivedListNode >::detach(), CacheImpl< PageT, VictimPolicyT >::findFreePage(), CacheImpl< PageT, VictimPolicyT >::freePage(), PageBucket< PageT >::mutex, PageBucket< PageT >::pageList, CacheImpl< PageT, VictimPolicyT >::pages, CacheImpl< PageT, VictimPolicyT >::unallocatedBucket, and CacheImpl< PageT, VictimPolicyT >::victimPolicy.
00277 { 00278 assert(nMemPagesDesired <= pages.size()); 00279 // exclusive lock unallocatedBucket in case someone is crazy enough to call 00280 // this method from multiple threads 00281 SXMutexExclusiveGuard unallocatedBucketGuard(unallocatedBucket.mutex); 00282 uint nMemPages = 00283 pages.size() - unallocatedBucket.pageList.size(); 00284 if (nMemPages < nMemPagesDesired) { 00285 // allocate some more 00286 00287 // LER-5976: Allocate all pBuffers ahead of time so we can revert to 00288 // the old cache size if there's an allocation error. 00289 int nMemPagesToAllocate = nMemPagesDesired - nMemPages; 00290 std::vector<PBuffer> buffers(nMemPagesToAllocate); 00291 00292 for (int i = 0; i < nMemPagesToAllocate; ++i) { 00293 int errorCode; 00294 PBuffer pBuffer = static_cast<PBuffer>( 00295 bufferAllocator.allocate(&errorCode)); 00296 00297 if (pBuffer == NULL) { 00298 // Release each allocated buffer and re-throw 00299 for (int i = 0; i < nMemPagesToAllocate; i++) { 00300 if (buffers[i] == NULL) { 00301 break; 00302 } 00303 00304 // Ignore any errors and try to deallocate as many of the 00305 // buffers as possible. Ignoring errors leads to a failed 00306 // assertion in the VMAllocator destructor on shutdown. See 00307 // the comment there. 00308 bufferAllocator.deallocate(buffers[i]); 00309 } 00310 buffers.clear(); 00311 std::vector<PBuffer>(0).swap(buffers); // dealloc vector 00312 00313 throw SysCallExcn("mmap failed", errorCode); 00314 } 00315 00316 buffers[i] = pBuffer; 00317 } 00318 00319 PageBucketMutator mutator(unallocatedBucket.pageList); 00320 for (int i = 0; i < nMemPagesToAllocate; i++) { 00321 PBuffer pBuffer = buffers[i]; 00322 PageT *page = mutator.detach(); 00323 assert(!page->pBuffer); 00324 page->pBuffer = pBuffer; 00325 victimPolicy.registerPage(*page); 00326 // move to unmappedBucket 00327 freePage(*page); 00328 } 00329 } else { 00330 // deallocate some 00331 for (; nMemPages > nMemPagesDesired; --nMemPages) { 00332 PageT *page; 00333 do { 00334 page = findFreePage(); 00335 } while (!page); 00336 00337 int errorCode; 00338 if (bufferAllocator.deallocate(page->pBuffer, &errorCode)) { 00339 // If the page buffer couldn't be deallocated, put it back 00340 // before reporting the error 00341 freePage(*page); 00342 throw SysCallExcn("munmap failed", errorCode); 00343 } 00344 page->pBuffer = NULL; 00345 victimPolicy.unregisterPage(*page); 00346 // move to unallocatedBucket 00347 unallocatedBucket.pageList.push_back(*page); 00348 } 00349 } 00350 00351 calcDirtyThreshholds(nMemPagesDesired); 00352 // Notify the policy of the new cache size 00353 victimPolicy.setAllocatedPageCount(nMemPagesDesired); 00354 }
uint CacheImpl< PageT, VictimPolicyT >::getAllocatedPageCount | ( | ) | [virtual] |
Gets a count of how many pages currently have allocated buffers.
Implements Cache.
Definition at line 262 of file CacheMethodsImpl.h.
References PageBucket< PageT >::mutex, PageBucket< PageT >::pageList, CacheImpl< PageT, VictimPolicyT >::pages, and CacheImpl< PageT, VictimPolicyT >::unallocatedBucket.
00263 { 00264 SXMutexSharedGuard guard(unallocatedBucket.mutex); 00265 return pages.size() - unallocatedBucket.pageList.size(); 00266 }
uint CacheImpl< PageT, VictimPolicyT >::getMaxAllocatedPageCount | ( | ) | [virtual] |
Implements Cache.
Definition at line 269 of file CacheMethodsImpl.h.
References CacheImpl< PageT, VictimPolicyT >::pages.
00270 { 00271 return pages.size(); 00272 }
PageT * CacheImpl< PageT, VictimPolicyT >::lockPage | ( | BlockId | blockId, | |
LockMode | lockMode, | |||
bool | readIfUnmapped, | |||
MappedPageListener * | pMappedPageListener, | |||
TxnId | txnId | |||
) | [virtual] |
Locks a page into memory with the specified concurrency mode.
When the page contents are no longer needed, the caller must invoke the unlockPage() method with the same concurrency mode to release it. If the desired page is already locked by another thread with an incompatible concurrency mode, blocks until the page becomes available (unless the lock mode is of the NoWait variety, in which case returns NULL immediately). Note that NoWait locking only applies to lock contention, not I/O, so if an unmapped page is locked in NoWait mode, blocks until the read completes.
The device referenced by the requested blockId must already be registered with the cache and must remain registered for the duration of the lock.
Notes on concurrency modes:
blockId | the BlockId of the page to be locked | |
lockMode | the desired concurrency mode | |
readIfUnmapped | if true (the default) the page data is read as part of mapping; if false, the page data is left invalid until first write (used when allocating a new block with invalid contents) | |
pMappedPageListener | optional listener to receive notifications when this page is written; if specified, it must match all prior and subsequent lock requests for the same page mapping | |
txnId | optional TxnId to associate with lock; default is IMPLICIT_TXN_ID, which uses current thread ID as an implicit TxnId |
Implements CacheAccessor.
Definition at line 358 of file CacheMethodsImpl.h.
References CachePage::DATA_READ, CachePage::DATA_WRITE, ETERNITY, CompoundId::getDeviceId(), LOCKMODE_S_NOWAIT, LOCKMODE_X, LOCKMODE_X_NOWAIT, and NULL_BLOCK_ID.
00361 { 00362 // first find the page and increment its reference count 00363 00364 assert(blockId != NULL_BLOCK_ID); 00365 assert(CompoundId::getDeviceId(blockId) != NULL_DEVICE_ID); 00366 PageBucketT &bucket = getHashBucket(blockId); 00367 PageT *page = lookupPage(bucket,blockId,true); 00368 if (page) { 00369 assert(page->pMappedPageListener == pMappedPageListener); 00370 // note that lookupPage incremented page's reference count for us, so 00371 // it's safe from victimization from here on 00372 incrementStatsCounter(nCacheHits); 00373 } else { 00374 do { 00375 page = findFreePage(); 00376 } while (!page); 00377 00378 // note that findFreePage returns an unmapped page, making it safe from 00379 // victimization at this point; mapPage will increment the reference 00380 // count 00381 00382 PageT &mappedPage = mapPage( 00383 bucket,*page,blockId,pMappedPageListener,readIfUnmapped); 00384 if (&mappedPage == page) { 00385 // mapPage found no existing mapping, so initiate read from disk if 00386 // necessary 00387 if (readIfUnmapped) { 00388 readPageAsync(*page); 00389 } 00390 } else { 00391 // mapPage found an existing mapping, so forget unused free page, 00392 // and no need to initiate read from disk 00393 page = &mappedPage; 00394 } 00395 if (readIfUnmapped) { 00396 // whether or not an existing mapping was found, need 00397 // to wait for any pending read to complete (either our own started 00398 // above or someone else's) 00399 StrictMutexGuard pageGuard(page->mutex); 00400 while (page->dataStatus == CachePage::DATA_READ) { 00401 page->waitForPendingIO(pageGuard); 00402 } 00403 } 00404 } 00405 00406 incrementStatsCounter(nCacheRequests); 00407 00408 // now acquire the requested lock 00409 00410 if (!page->lock.waitFor(lockMode,ETERNITY,txnId)) { 00411 // NoWait failed; release reference 00412 assert((lockMode == LOCKMODE_S_NOWAIT) || 00413 (lockMode == LOCKMODE_X_NOWAIT)); 00414 StrictMutexGuard pageGuard(page->mutex); 00415 page->nReferences--; 00416 if (!page->nReferences) { 00417 victimPolicy.notifyPageUnpin(*page); 00418 } 00419 return NULL; 00420 } 00421 if ((lockMode == LOCKMODE_X) || (lockMode == LOCKMODE_X_NOWAIT)) { 00422 // if we're locking the page for write, then need to make sure 00423 // that any pending write completes before this thread starts 00424 // changing the contents 00425 00426 // REVIEW: can we use double-checked idiom here? 00427 StrictMutexGuard pageGuard(page->mutex); 00428 while (page->dataStatus == CachePage::DATA_WRITE) { 00429 page->waitForPendingIO(pageGuard); 00430 } 00431 #ifdef DEBUG 00432 int errorCode; 00433 if (bufferAllocator.setProtection( 00434 page->pBuffer, cbPage, false, &errorCode)) 00435 { 00436 throw new SysCallExcn("memory protection failed", errorCode); 00437 } 00438 #endif 00439 } else { 00440 // TODO jvs 7-Feb-2006: protection for other cases 00441 #ifdef DEBUG 00442 StrictMutexGuard pageGuard(page->mutex); 00443 if (page->nReferences == 1) { 00444 int errorCode; 00445 if (bufferAllocator.setProtection( 00446 page->pBuffer, cbPage, true, &errorCode)) 00447 { 00448 throw new SysCallExcn("memory protection failed", errorCode); 00449 } 00450 } 00451 #endif 00452 } 00453 return page; 00454 }
PageT & CacheImpl< PageT, VictimPolicyT >::lockScratchPage | ( | BlockNum | blockNum | ) | [virtual] |
Allocates a free page buffer for scratch usage.
The returned page is considered to be locked in exclusive mode but not mapped to any device. To release the page, use unlock(LOCKMODE_X).
Although the page remains unmapped, its BlockId is set for the duration of the lock. The caller can supply a BlockNum, which need not be unique.
blockNum | the block number to use when making up the Page's BlockId; the device ID will be NULL_DEVICE_ID |
Implements Cache.
Definition at line 546 of file CacheMethodsImpl.h.
References CachePage::DATA_DIRTY, CompoundId::setBlockNum(), and CompoundId::setDeviceId().
00547 { 00548 PageT *page; 00549 do { 00550 page = findFreePage(); 00551 } while (!page); 00552 00553 StrictMutexGuard pageGuard(page->mutex); 00554 page->nReferences = 1; 00555 // Set dirty early to avoid work on first call to getWritableData. 00556 // No need to notify the victimPolicy that the page is dirty because 00557 // scratch pages are locked for the duration of their use so they're 00558 // never candidates for victimization or flushing. 00559 page->dataStatus = CachePage::DATA_DIRTY; 00560 CompoundId::setDeviceId(page->blockId,NULL_DEVICE_ID); 00561 CompoundId::setBlockNum(page->blockId,blockNum); 00562 00563 return *page; 00564 }
void CacheImpl< PageT, VictimPolicyT >::discardPage | ( | BlockId | blockId | ) | [virtual] |
Unmaps a page from the cache if already mapped, discarding its contents if dirty.
The caller must ensure that no other thread has the page locked.
blockId | the BlockId of the page to be discarded |
Implements CacheAccessor.
Definition at line 520 of file CacheMethodsImpl.h.
References CachePage::DATA_WRITE, and NULL_BLOCK_ID.
00521 { 00522 assert(blockId != NULL_BLOCK_ID); 00523 PageBucketT &bucket = getHashBucket(blockId); 00524 PageT *page = lookupPage(bucket,blockId,false); 00525 if (!page) { 00526 // page is not mapped, so nothing to discard, but still need to 00527 // notify the policy 00528 victimPolicy.notifyPageDiscard(blockId); 00529 return; 00530 } 00531 StrictMutexGuard pageGuard(page->mutex); 00532 // lookupPage already waited for pending reads, but also need to wait for 00533 // pending writes 00534 // REVIEW: isn't this redundant with code in unmapAndFreeDiscardedPage? 00535 while (page->dataStatus == CachePage::DATA_WRITE) { 00536 page->waitForPendingIO(pageGuard); 00537 } 00538 // our own lookupPage adds 1 reference; it should be the only one left 00539 assert(page->nReferences == 1); 00540 page->nReferences = 0; 00541 unmapAndFreeDiscardedPage(*page,pageGuard); 00542 }
uint CacheImpl< PageT, VictimPolicyT >::checkpointPages | ( | PagePredicate & | pagePredicate, | |
CheckpointType | checkpointType | |||
) | [virtual] |
Flushes and/or unmaps selected pages.
pagePredicate | caller-provided interface for deciding which pages should be checkpointed; the given PagePredicate will be called for each mapped page, and only those which satisfy the predicate will be affected by the checkpoint (note that the page mutex is held for the duration of the call, so implementations must take care to avoid deadlock) | |
checkpointType | type of checkpoint to execute |
Implements Cache.
Definition at line 698 of file CacheMethodsImpl.h.
References CHECKPOINT_FLUSH_AND_UNMAP, and CachePage::DATA_WRITE.
00700 { 00701 // TODO: change RandomAccessRequest interface so that we can gang 00702 // these all up into one big discontiguous request 00703 00704 uint nPages = 0; 00705 bool countPages = true; 00706 00707 FlushPhase flushPhase; 00708 if (checkpointType >= CHECKPOINT_FLUSH_AND_UNMAP) { 00709 flushPhase = phaseInitiate; 00710 } else { 00711 flushPhase = phaseSkip; 00712 } 00713 for (;;) { 00714 for (uint i = 0; i < pages.size(); i++) { 00715 PageT &page = *(pages[i]); 00716 StrictMutexGuard pageGuard(page.mutex); 00717 // restrict view to just mapped pages of interest 00718 if (!page.hasBlockId()) { 00719 continue; 00720 } 00721 if (!pagePredicate(page)) { 00722 continue; 00723 } 00724 if (countPages) { 00725 ++nPages; 00726 } 00727 if (flushPhase == phaseInitiate) { 00728 if (page.isDirty()) { 00729 // shouldn't be flushing a page if someone is currently 00730 // scribbling on it 00731 assert(!page.isExclusiveLockHeld()); 00732 incrementStatsCounter(nCheckpointWrites); 00733 // initiate a flush 00734 writePageAsync(page); 00735 } 00736 } else if (flushPhase == phaseWait) { 00737 BlockId origBlockId = page.getBlockId(); 00738 MappedPageListener *origListener = page.pMappedPageListener; 00739 while (page.dataStatus == CachePage::DATA_WRITE) { 00740 page.waitForPendingIO(pageGuard); 00741 } 00742 00743 // If this page has been remapped during sleeps that occurred 00744 // while waiting for the page I/O to complete, then there's 00745 // no need to reset the listener, since the remap has 00746 // effectively reset the listener. (TODO: zfong 6/23/08 - 00747 // Add a unit testcase for this.) 00748 // 00749 // Otherwise, reset the listener, if called for by the original 00750 // listener. Note that by doing so, during the next iteration 00751 // in the outermost for loop in this method when we're 00752 // unmapping cache entries, we will not unmap this page 00753 // because we've changed the listener. 00754 if (page.pMappedPageListener && 00755 page.pMappedPageListener == origListener && 00756 page.getBlockId() == origBlockId) 00757 { 00758 MappedPageListener *newListener = 00759 page.pMappedPageListener->notifyAfterPageCheckpointFlush( 00760 page); 00761 if (newListener != NULL) { 00762 page.pMappedPageListener = newListener; 00763 } 00764 } 00765 } else { 00766 if (checkpointType <= CHECKPOINT_FLUSH_AND_UNMAP) { 00767 unmapAndFreeDiscardedPage(page,pageGuard); 00768 } 00769 } 00770 } 00771 countPages = false; 00772 if (flushPhase == phaseInitiate) { 00773 flushPhase = phaseWait; 00774 continue; 00775 } 00776 if (flushPhase == phaseWait) { 00777 if (checkpointType <= CHECKPOINT_FLUSH_AND_UNMAP) { 00778 flushPhase = phaseSkip; 00779 continue; 00780 } 00781 } 00782 return nPages; 00783 } 00784 }
void CacheImpl< PageT, VictimPolicyT >::collectStats | ( | CacheStats & | stats | ) | [virtual] |
Gets a snapshot of cache activity; as a side-effect, clears cumulative performance counters.
stats | receives the snapshot |
Implements Cache.
Definition at line 1087 of file CacheMethodsImpl.h.
References CacheStats::nCheckpointWrites, CacheStats::nCheckpointWritesSinceInit, CacheStats::nDirtyPages, CacheStats::nHits, CacheStats::nHitsSinceInit, CacheStats::nIoRetries, CacheStats::nIoRetriesSinceInit, CacheStats::nLazyWriteCalls, CacheStats::nLazyWriteCallsSinceInit, CacheStats::nLazyWrites, CacheStats::nLazyWritesSinceInit, CacheStats::nMemPagesAllocated, CacheStats::nMemPagesMax, CacheStats::nMemPagesUnused, CacheStats::nPageReads, CacheStats::nPageReadsSinceInit, CacheStats::nPageWrites, CacheStats::nPageWritesSinceInit, CacheStats::nRejectedPrefetches, CacheStats::nRejectedPrefetchesSinceInit, CacheStats::nRequests, CacheStats::nRequestsSinceInit, CacheStats::nSuccessfulPrefetches, CacheStats::nSuccessfulPrefetchesSinceInit, CacheStats::nVictimizations, CacheStats::nVictimizationsSinceInit, CacheStats::nVictimizationWrites, and CacheStats::nVictimizationWritesSinceInit.
01088 { 01089 stats.nHits = nCacheHits; 01090 stats.nRequests = nCacheRequests; 01091 stats.nVictimizations = nVictimizations; 01092 stats.nDirtyPages = nDirtyPages; 01093 stats.nPageReads = nPageReads; 01094 stats.nPageWrites = nPageWrites; 01095 stats.nRejectedPrefetches = nRejectedCachePrefetches; 01096 stats.nIoRetries = nIoRetries; 01097 stats.nSuccessfulPrefetches = nSuccessfulCachePrefetches; 01098 stats.nLazyWrites = nLazyWrites; 01099 stats.nLazyWriteCalls = nLazyWriteCalls; 01100 stats.nVictimizationWrites = nVictimizationWrites; 01101 stats.nCheckpointWrites = nCheckpointWrites; 01102 stats.nMemPagesAllocated = getAllocatedPageCount(); 01103 stats.nMemPagesUnused = unmappedBucket.pageList.size(); 01104 stats.nMemPagesMax = getMaxAllocatedPageCount(); 01105 01106 // NOTE: nDirtyPages is not cumulative; don't clear it! 01107 nCacheHits.clear(); 01108 nCacheRequests.clear(); 01109 nVictimizations.clear(); 01110 nPageReads.clear(); 01111 nPageWrites.clear(); 01112 nRejectedCachePrefetches.clear(); 01113 nIoRetries.clear(); 01114 nSuccessfulCachePrefetches.clear(); 01115 nLazyWrites.clear(); 01116 nLazyWriteCalls.clear(); 01117 nVictimizationWrites.clear(); 01118 nCheckpointWrites.clear(); 01119 01120 statsSinceInit.nHitsSinceInit += stats.nHits; 01121 statsSinceInit.nRequestsSinceInit += stats.nRequests; 01122 statsSinceInit.nVictimizationsSinceInit += stats.nVictimizations; 01123 statsSinceInit.nPageReadsSinceInit += stats.nPageReads; 01124 statsSinceInit.nPageWritesSinceInit += stats.nPageWrites; 01125 statsSinceInit.nRejectedPrefetchesSinceInit += stats.nRejectedPrefetches; 01126 statsSinceInit.nIoRetriesSinceInit += stats.nIoRetries; 01127 statsSinceInit.nSuccessfulPrefetchesSinceInit += 01128 stats.nSuccessfulPrefetches; 01129 statsSinceInit.nLazyWritesSinceInit += stats.nLazyWrites; 01130 statsSinceInit.nLazyWriteCallsSinceInit += stats.nLazyWriteCalls; 01131 statsSinceInit.nVictimizationWritesSinceInit += stats.nVictimizationWrites; 01132 statsSinceInit.nCheckpointWritesSinceInit += stats.nCheckpointWrites; 01133 01134 stats.nHitsSinceInit = statsSinceInit.nHitsSinceInit; 01135 stats.nRequestsSinceInit = statsSinceInit.nRequestsSinceInit; 01136 stats.nVictimizationsSinceInit = statsSinceInit.nVictimizationsSinceInit; 01137 stats.nPageReadsSinceInit = statsSinceInit.nPageReadsSinceInit; 01138 stats.nPageWritesSinceInit = statsSinceInit.nPageWritesSinceInit; 01139 stats.nRejectedPrefetchesSinceInit = 01140 statsSinceInit.nRejectedPrefetchesSinceInit; 01141 stats.nIoRetriesSinceInit = 01142 statsSinceInit.nIoRetriesSinceInit; 01143 stats.nSuccessfulPrefetchesSinceInit = 01144 statsSinceInit.nSuccessfulPrefetchesSinceInit; 01145 stats.nLazyWritesSinceInit = statsSinceInit.nLazyWritesSinceInit; 01146 stats.nLazyWriteCallsSinceInit = statsSinceInit.nLazyWriteCallsSinceInit; 01147 stats.nVictimizationWritesSinceInit = 01148 statsSinceInit.nVictimizationWritesSinceInit; 01149 stats.nCheckpointWritesSinceInit = 01150 statsSinceInit.nCheckpointWritesSinceInit; 01151 }
void CacheImpl< PageT, VictimPolicyT >::registerDevice | ( | DeviceId | deviceId, | |
SharedRandomAccessDevice | pDevice | |||
) | [virtual] |
Registers the given device with the Cache; must be called exactly once before any other caching operations can be requested for pages of this device.
deviceId | the ID of the device to be registered | |
pDevice | the device to be registered |
Implements Cache.
Definition at line 825 of file CacheMethodsImpl.h.
References opaqueToInt().
00826 { 00827 assert(deviceTable[opaqueToInt(deviceId)] == NULL); 00828 deviceTable[opaqueToInt(deviceId)] = pDevice; 00829 pDeviceAccessScheduler->registerDevice(pDevice); 00830 }
void CacheImpl< PageT, VictimPolicyT >::unregisterDevice | ( | DeviceId | deviceId | ) | [virtual] |
Unregisters the given device from the Cache, asserting that no pages remain mapped to the specified device.
deviceId | the ID of the device to be unregistered |
Implements Cache.
Definition at line 834 of file CacheMethodsImpl.h.
References CHECKPOINT_DISCARD.
Referenced by CacheImpl< PageT, VictimPolicyT >::closeImpl().
00835 { 00836 SharedRandomAccessDevice &pDevice = getDevice(deviceId); 00837 assert(pDevice); 00838 DeviceIdPagePredicate pagePredicate(deviceId); 00839 uint nPages = checkpointPages(pagePredicate,CHECKPOINT_DISCARD); 00840 assert(!nPages); 00841 pDeviceAccessScheduler->unregisterDevice(pDevice); 00842 pDevice.reset(); 00843 }
SharedRandomAccessDevice & CacheImpl< PageT, VictimPolicyT >::getDevice | ( | DeviceId | deviceId | ) | [virtual] |
Dereferences a device ID to the registered object which represents it.
deviceId | the ID of the device of interest |
Implements Cache.
Definition at line 847 of file CacheMethodsImpl.h.
References opaqueToInt().
Referenced by CacheImpl< PageT, VictimPolicyT >::closeImpl().
00848 { 00849 return deviceTable[opaqueToInt(deviceId)]; 00850 }
bool CacheImpl< PageT, VictimPolicyT >::prefetchPage | ( | BlockId | blockId, | |
MappedPageListener * | pMappedPageListener | |||
) | [virtual] |
Hints that a page should be prefetched in preparation for a future lock request.
blockId | the BlockId of the page to be prefetched | |
pMappedPageListener | optional listener to receive notifications when this page is written; if specified, it must match all prior and subsequent lock requests for the same page mapping |
Implements CacheAccessor.
Definition at line 568 of file CacheMethodsImpl.h.
References NULL_BLOCK_ID.
00569 { 00570 assert(blockId != NULL_BLOCK_ID); 00571 if (isPageMapped(blockId)) { 00572 // already mapped: either it's already fully read or someone 00573 // else has initiated a read; either way, nothing for us to do 00574 successfulPrefetch(); 00575 return true; 00576 } 00577 PageT *page = findFreePage(); 00578 if (!page) { 00579 // cache is low on free pages: ignore prefetch hint 00580 rejectedPrefetch(); 00581 return false; 00582 } 00583 00584 PageBucketT &bucket = getHashBucket(blockId); 00585 bool bPendingRead = true; 00586 // don't need to increment the page reference count since the pending 00587 // read will protect the page from victimization, and the calling thread 00588 // doesn't actually want a reference until it locks the page later 00589 bool bIncRef = false; 00590 PageT &mappedPage = mapPage( 00591 bucket,*page,blockId,pMappedPageListener,bPendingRead,bIncRef); 00592 if (&mappedPage == page) { 00593 if (readPageAsync(*page)) { 00594 successfulPrefetch(); 00595 } else { 00596 rejectedPrefetch(); 00597 return false; 00598 } 00599 } else { 00600 // forget unused free page, and don't bother with read since someone 00601 // else must already have kicked it off 00602 page = &mappedPage; 00603 } 00604 return true; 00605 }
void CacheImpl< PageT, VictimPolicyT >::prefetchBatch | ( | BlockId | blockId, | |
uint | nPages, | |||
MappedPageListener * | pMappedPageListener | |||
) | [virtual] |
Hints that a contiguous run of pages should be prefetched.
blockId | the BlockId of the first page to be prefetched; more will be prefetched depending on the configured batch size | |
nPages | number of pages in batch | |
pMappedPageListener | optional listener to receive notifications when this page is written; if specified, it must match all prior and subsequent lock requests for the same page mapping |
Implements CacheAccessor.
Definition at line 609 of file CacheMethodsImpl.h.
References RandomAccessRequest::bindingList, RandomAccessRequest::cbOffset, RandomAccessRequest::cbTransfer, CompoundId::getDeviceId(), CompoundId::incBlockNum(), NULL_BLOCK_ID, RandomAccessRequest::pDevice, RandomAccessRequest::READ, DeviceAccessScheduler::schedule(), and RandomAccessRequest::type.
00612 { 00613 assert(blockId != NULL_BLOCK_ID); 00614 assert(nPagesPerBatch > 1); 00615 00616 SharedRandomAccessDevice &pDevice = getDevice( 00617 CompoundId::getDeviceId(blockId)); 00618 DeviceAccessScheduler &scheduler = getDeviceAccessScheduler(*pDevice); 00619 RandomAccessRequest request; 00620 request.pDevice = pDevice.get(); 00621 request.cbOffset = getPageOffset(blockId); 00622 request.cbTransfer = 0; 00623 request.type = RandomAccessRequest::READ; 00624 00625 BlockId blockIdi = blockId; 00626 for (uint i = 0; i < nPagesPerBatch; i++) { 00627 PageT *page; 00628 do { 00629 page = findFreePage(); 00630 } while (!page); 00631 00632 PageBucketT &bucket = getHashBucket(blockIdi); 00633 bool bPendingRead = true; 00634 bool bIncRef = false; 00635 PageT &mappedPage = mapPage( 00636 bucket,*page,blockIdi,pMappedPageListener,bPendingRead,bIncRef); 00637 if (&mappedPage != page) { 00638 // This page already mapped; can't do batch prefetch. For the 00639 // pages which we've already mapped, initiate transfer. 00640 // For this page, skip altogether since it's already been read 00641 // (or has a read in progress). For remaining pages, continue 00642 // building new request. 00643 if (request.cbTransfer) { 00644 if (scheduler.schedule(request)) { 00645 successfulPrefetch(); 00646 } else { 00647 ioRetry(); 00648 rejectedPrefetch(); 00649 } 00650 } 00651 // adjust start past transfer just initiated plus already mapped 00652 // page 00653 request.cbOffset += request.cbTransfer; 00654 request.cbOffset += getPageSize(); 00655 request.cbTransfer = 0; 00656 request.bindingList.clear(false); 00657 } else { 00658 // add this page to the request 00659 request.cbTransfer += getPageSize(); 00660 request.bindingList.push_back(*page); 00661 } 00662 CompoundId::incBlockNum(blockIdi); 00663 } 00664 // deal with leftovers 00665 if (request.cbTransfer) { 00666 if (scheduler.schedule(request)) { 00667 successfulPrefetch(); 00668 } else { 00669 ioRetry(); 00670 rejectedPrefetch(); 00671 } 00672 } 00673 }
void CacheImpl< PageT, VictimPolicyT >::flushPage | ( | CachePage & | page, | |
bool | async | |||
) | [virtual] |
Forces the contents of a dirty page to its mapped location.
Page must already be locked in exclusive mode. For an asynchronous flush, the caller must ensure that the page contents remain unchanged until the flush completes.
page | the page to be flushed | |
async | true to schedle async write and return immediately; false to wait for write to complete |
Implements CacheAccessor.
Definition at line 788 of file CacheMethodsImpl.h.
References MappedPageListener::canFlushPage(), CachePage::DATA_WRITE, CachePage::dataStatus, CachePage::isExclusiveLockHeld(), CachePage::mutex, CachePage::pMappedPageListener, and CachePage::waitForPendingIO().
00789 { 00790 StrictMutexGuard pageGuard(page.mutex); 00791 assert(page.isExclusiveLockHeld()); 00792 if (page.pMappedPageListener) { 00793 if (!page.pMappedPageListener->canFlushPage(page)) { 00794 if (async) { 00795 // TODO jvs 21-Jan-2006: instead of ignoring request, fail; we 00796 // should be using Segment-level logic to avoid ever getting 00797 // here 00798 return; 00799 } 00800 permFail("attempt to flush page out of order"); 00801 } 00802 } 00803 if (page.dataStatus != CachePage::DATA_WRITE) { 00804 // no flush already in progress, so request one 00805 writePageAsync(static_cast<PageT &>(page)); 00806 } 00807 if (async) { 00808 return; 00809 } 00810 // wait for flush to complete 00811 while (page.dataStatus == CachePage::DATA_WRITE) { 00812 page.waitForPendingIO(pageGuard); 00813 } 00814 }
void CacheImpl< PageT, VictimPolicyT >::unlockPage | ( | CachePage & | page, | |
LockMode | lockMode, | |||
TxnId | txnId | |||
) | [virtual] |
Releases lock held on page.
page | the page to be unlocked | |
lockMode | must correspond to value passed to Cache::lockPage; however, for pages locked with NOWAIT, the equivalent unlock type should be normal (e.g. LOCKMODE_S instead of LOCKMODE_S_NOWAIT) | |
txnId | must correspond to value passed to Cache::lockPage |
Implements CacheAccessor.
Definition at line 458 of file CacheMethodsImpl.h.
References CachePage::DATA_INVALID, CompoundId::getDeviceId(), LOCKMODE_S_NOWAIT, and NULL_BLOCK_ID.
00460 { 00461 assert(lockMode < LOCKMODE_S_NOWAIT); 00462 PageT &page = static_cast<PageT &>(vPage); 00463 StrictMutexGuard pageGuard(page.mutex); 00464 assert(page.nReferences); 00465 bool bFree = false; 00466 assert(page.hasBlockId()); 00467 if (CompoundId::getDeviceId(page.getBlockId()) == NULL_DEVICE_ID) { 00468 // originated from lockScratchPage() 00469 bFree = true; 00470 } else { 00471 int errorCode; 00472 if (bufferAllocator.setProtection( 00473 page.pBuffer, cbPage, false, &errorCode)) 00474 { 00475 throw new SysCallExcn("memory protection failed", errorCode); 00476 } 00477 00478 page.lock.release(lockMode,txnId); 00479 } 00480 page.nReferences--; 00481 if (!page.nReferences) { 00482 if (bFree) { 00483 // The page lock was acquired via lockScratch, so return it to 00484 // the free list. No need to notify the victimPolicy since 00485 // the policy wasn't notified when the page was locked. 00486 page.dataStatus = CachePage::DATA_INVALID; 00487 page.blockId = NULL_BLOCK_ID; 00488 freePage(page); 00489 } else { 00490 // notify the victim policy that the page is no longer 00491 // being referenced 00492 victimPolicy.notifyPageUnpin(page); 00493 } 00494 00495 // let waiting threads know that a page has become available 00496 // (either on the free list or as a victimization candidate) 00497 freePageCondition.notify_all(); 00498 } 00499 }
void CacheImpl< PageT, VictimPolicyT >::nicePage | ( | CachePage & | page | ) | [virtual] |
Marks a page as nice, indicating that it is very unlikely the page's mapping will be needed again any time soon, so it is a good candidate for victimization.
page | the page to be marked |
Implements CacheAccessor.
Definition at line 818 of file CacheMethodsImpl.h.
00819 { 00820 victimPolicy.notifyPageNice(static_cast<PageT &>(page)); 00821 }
bool CacheImpl< PageT, VictimPolicyT >::isPageMapped | ( | BlockId | blockId | ) | [virtual] |
Determines if a particular page is mapped.
blockId | BlockId of the page to test |
Implements Cache.
Definition at line 503 of file CacheMethodsImpl.h.
References PageBucket< PageT >::mutex, PageBucket< PageT >::pageList, and SXMutexGuard< lockMode >::unlock().
00504 { 00505 PageBucketT &bucket = getHashBucket(blockId); 00506 SXMutexSharedGuard bucketGuard(bucket.mutex); 00507 for (PageBucketIter iter(bucket.pageList); iter; ++iter) { 00508 StrictMutexGuard pageGuard(iter->mutex); 00509 if (iter->getBlockId() == blockId) { 00510 bucketGuard.unlock(); 00511 victimPolicy.notifyPageAccess(*iter, false); 00512 return true; 00513 } 00514 } 00515 return false; 00516 }
CacheAllocator & CacheImpl< PageT, VictimPolicyT >::getAllocator | ( | ) | const [virtual] |
Implements Cache.
Definition at line 1340 of file CacheMethodsImpl.h.
Referenced by CachePage::tryUpgrade(), and CachePage::upgrade().
01341 { 01342 return bufferAllocator; 01343 }
void CacheImpl< PageT, VictimPolicyT >::getPrefetchParams | ( | uint & | prefetchPagesMax, | |
uint & | prefetchThrottleRate | |||
) | [virtual] |
Retrieves the current pre-fetch caching parameters that determine how many pages should be pre-fetched and how often the pre-fetches should occur.
[out] | prefetchPagesMax | max number of outstanding pre-fetch pages |
[out] | prefetchThrottleRate | the number of successful pre-fetches that have to occur before the pre-fetch rate is throttled back up, in the event that it has been throttled down due to rejected requests |
Implements CacheAccessor.
Definition at line 105 of file CacheMethodsImpl.h.
00108 { 00109 prefetchPagesMax = this->prefetchPagesMax; 00110 prefetchThrottleRate = this->prefetchThrottleRate; 00111 }
virtual DeviceAccessScheduler& CacheImpl< PageT, VictimPolicyT >::getDeviceAccessScheduler | ( | RandomAccessDevice & | ) | [inline, virtual] |
Gets the correct access scheduler for a given device.
Currently the same scheduler is used for all devices.
Implements Cache.
Definition at line 614 of file CacheImpl.h.
References CacheImpl< PageT, VictimPolicyT >::pDeviceAccessScheduler.
00616 { 00617 return *pDeviceAccessScheduler; 00618 }
SharedCache Cache::newCache | ( | CacheParams const & | cacheParams, | |
CacheAllocator * | bufferAllocator = NULL | |||
) | [static, inherited] |
Factory method.
This creates a cache which uses TwoQVictimPolicy. To create a cache with custom policies, include CacheImpl.h and instantiate CacheImpl directly.
cacheParams | parameters to use to instantiate this cache | |
bufferAllocator | allocator to use for obtaining buffer memory; NULL indicates use a private VMAllocator without mlocking |
Definition at line 51 of file Cache.cpp.
Referenced by DatabaseTest::DatabaseTest(), CacheTestBase::newCache(), SparseBitmapTest::openStorage(), BackupRestoreTest::testBackupCleanup(), BTreeTxnTest::testCaseSetUp(), BackupRestoreTest::testHeaderBackupRestore(), CacheTest::testLargeCacheInit(), CacheTest::testLargeCacheRequest(), and CmdInterpreter::visit().
00054 { 00055 typedef CacheImpl< 00056 TwoQPage, 00057 TwoQVictimPolicy<TwoQPage> 00058 > TwoQCache; 00059 return SharedCache( 00060 new TwoQCache(cacheParams,bufferAllocator), 00061 ClosableObjectDestructor()); 00062 }
uint Cache::getPageSize | ( | ) | const [inline, inherited] |
Definition at line 137 of file Cache.h.
Referenced by DoubleBufferExecStream::execute(), CachePage::getBufferSize(), FlatFileExecStreamImpl::open(), ScratchBufferExecStream::open(), DoubleBufferExecStream::open(), CachePage::tryUpgrade(), and CachePage::upgrade().
00138 { 00139 return cbPage; 00140 }
SharedCache Cache::getCache | ( | ) | [virtual, inherited] |
Implements CacheAccessor.
Definition at line 64 of file Cache.cpp.
Referenced by CachePage::getWritableData(), CachePage::tryUpgrade(), and CachePage::upgrade().
uint Cache::getMaxLockedPages | ( | ) | [virtual, inherited] |
Implements CacheAccessor.
Definition at line 69 of file Cache.cpp.
References Cache::getAllocatedPageCount().
00070 { 00071 return getAllocatedPageCount(); 00072 }
void Cache::setMaxLockedPages | ( | uint | nPages | ) | [virtual, inherited] |
Sets the page lock quota on this accessor.
Ignored for accessor implementations that don't support quotas.
nPages | new quota |
Implements CacheAccessor.
Definition at line 74 of file Cache.cpp.
void Cache::setTxnId | ( | TxnId | txnId | ) | [virtual, inherited] |
Sets a default TxnId to use for locking pages (to be used when IMPLICIT_TXN_ID is specified).
Not all CacheAccessor implementations support this behavior.
txnId | new default txn ID |
Implements CacheAccessor.
Definition at line 78 of file Cache.cpp.
TxnId Cache::getTxnId | ( | ) | const [virtual, inherited] |
Implements CacheAccessor.
Definition at line 82 of file Cache.cpp.
References IMPLICIT_TXN_ID.
00083 { 00084 return IMPLICIT_TXN_ID; 00085 }
void Cache::writeStats | ( | StatsTarget & | target | ) | [virtual, inherited] |
Writes a current stats snapshot to a StatsTarget.
target | receives the stats |
Implements StatsSource.
Definition at line 87 of file Cache.cpp.
References Cache::collectStats(), CacheStats::nCheckpointWrites, CacheStats::nCheckpointWritesSinceInit, CacheStats::nDirtyPages, CacheStats::nHits, CacheStats::nHitsSinceInit, CacheStats::nIoRetries, CacheStats::nIoRetriesSinceInit, CacheStats::nLazyWriteCalls, CacheStats::nLazyWriteCallsSinceInit, CacheStats::nLazyWrites, CacheStats::nLazyWritesSinceInit, CacheStats::nMemPagesAllocated, CacheStats::nMemPagesMax, CacheStats::nMemPagesUnused, CacheStats::nPageReads, CacheStats::nPageReadsSinceInit, CacheStats::nPageWrites, CacheStats::nPageWritesSinceInit, CacheStats::nRejectedPrefetches, CacheStats::nRejectedPrefetchesSinceInit, CacheStats::nRequests, CacheStats::nRequestsSinceInit, CacheStats::nSuccessfulPrefetches, CacheStats::nSuccessfulPrefetchesSinceInit, CacheStats::nVictimizations, CacheStats::nVictimizationsSinceInit, CacheStats::nVictimizationWrites, CacheStats::nVictimizationWritesSinceInit, and StatsTarget::writeCounter().
00088 { 00089 CacheStats stats; 00090 collectStats(stats); 00091 target.writeCounter( 00092 "CacheHits", stats.nHits); 00093 target.writeCounter( 00094 "CacheHitsSinceInit", stats.nHitsSinceInit); 00095 target.writeCounter( 00096 "CacheRequests", stats.nRequests); 00097 target.writeCounter( 00098 "CacheRequestsSinceInit", stats.nRequestsSinceInit); 00099 target.writeCounter( 00100 "CacheVictimizations", stats.nVictimizations); 00101 target.writeCounter( 00102 "CacheVictimizationsSinceInit", stats.nVictimizationsSinceInit); 00103 target.writeCounter( 00104 "CacheDirtyPages", stats.nDirtyPages); 00105 target.writeCounter( 00106 "CachePagesRead", stats.nPageReads); 00107 target.writeCounter( 00108 "CachePagesReadSinceInit", stats.nPageReadsSinceInit); 00109 target.writeCounter( 00110 "CachePagesWritten", stats.nPageWrites); 00111 target.writeCounter( 00112 "CachePagesWrittenSinceInit", stats.nPageWritesSinceInit); 00113 target.writeCounter( 00114 "CachePagePrefetchesRejected", stats.nRejectedPrefetches); 00115 target.writeCounter( 00116 "CachePagePrefetchesRejectedSinceInit", 00117 stats.nRejectedPrefetchesSinceInit); 00118 target.writeCounter( 00119 "CachePageIoRetries", stats.nIoRetries); 00120 target.writeCounter( 00121 "CachePageIoRetriesSinceInit", 00122 stats.nIoRetriesSinceInit); 00123 target.writeCounter( 00124 "CachePagesPrefetched", stats.nSuccessfulPrefetches); 00125 target.writeCounter( 00126 "CachePagesPrefetchedSinceInit", 00127 stats.nSuccessfulPrefetchesSinceInit); 00128 target.writeCounter("CacheLazyWrites", stats.nLazyWrites); 00129 target.writeCounter("CacheLazyWritesSinceInit", stats.nLazyWritesSinceInit); 00130 target.writeCounter("CacheLazyWriteCalls", stats.nLazyWriteCalls); 00131 target.writeCounter( 00132 "CacheLazyWriteCallsSinceInit", 00133 stats.nLazyWriteCallsSinceInit); 00134 target.writeCounter("CacheVictimizationWrites", stats.nVictimizationWrites); 00135 target.writeCounter( 00136 "CacheVictimizationWritesSinceInit", 00137 stats.nVictimizationWritesSinceInit); 00138 target.writeCounter("CacheCheckpointWrites", stats.nCheckpointWrites); 00139 target.writeCounter( 00140 "CacheCheckpointWritesSinceInit", 00141 stats.nCheckpointWritesSinceInit); 00142 target.writeCounter( 00143 "CachePagesAllocated", stats.nMemPagesAllocated); 00144 target.writeCounter( 00145 "CachePagesUnused", stats.nMemPagesUnused); 00146 target.writeCounter( 00147 "CachePagesAllocationLimit", stats.nMemPagesMax); 00148 }
bool ClosableObject::isClosed | ( | ) | const [inline, inherited] |
Definition at line 58 of file ClosableObject.h.
00059 { 00060 return !needsClose; 00061 }
void ClosableObject::close | ( | ) | [inherited] |
Closes this object, releasing any unallocated resources.
Reimplemented in CollectExecStream, CorrelationJoinExecStream, LcsClusterAppendExecStream, and LcsClusterReplaceExecStream.
Definition at line 39 of file ClosableObject.cpp.
References ClosableObject::closeImpl(), and ClosableObject::needsClose.
Referenced by CacheImpl< PageT, VictimPolicyT >::allocatePages(), LcsRowScanBaseExecStream::closeImpl(), ExecStreamGraphImpl::closeImpl(), FlatFileBuffer::open(), ClosableObjectDestructor::operator()(), and Segment::~Segment().
00040 { 00041 if (!needsClose) { 00042 return; 00043 } 00044 needsClose = false; 00045 closeImpl(); 00046 }
void ThreadTracker::onThreadStart | ( | ) | [virtual, inherited] |
Called in new thread context before thread's body runs.
Reimplemented in JavaThreadTracker, and StatsTimer.
Definition at line 33 of file ThreadTracker.cpp.
Referenced by TimerThread::run(), and ThreadPoolBase::runPooledThread().
void ThreadTracker::onThreadEnd | ( | ) | [virtual, inherited] |
Called in thread context after thread's body runs.
Reimplemented in JavaThreadTracker, and StatsTimer.
Definition at line 38 of file ThreadTracker.cpp.
Referenced by TimerThread::run(), and ThreadPoolBase::runPooledThread().
FennelExcn * ThreadTracker::cloneExcn | ( | std::exception & | ex | ) | [virtual, inherited] |
Clones an exception so that it can be rethrown in a different thread context.
ex | the excn to be cloned |
Reimplemented in JavaThreadTracker.
Definition at line 43 of file ThreadTracker.cpp.
Referenced by JavaThreadTracker::cloneExcn(), and ParallelExecStreamScheduler::executeTask().
00044 { 00045 return new FennelExcn(ex.what()); 00046 }
std::vector<SharedRandomAccessDevice> CacheImpl< PageT, VictimPolicyT >::deviceTable [private] |
Collection of registered devices indexed by DeviceId; this array is of fixed size, with a NULL slot indicating that the given device ID has not been registered; this permits synchronization-free access to the collection.
Definition at line 128 of file CacheImpl.h.
Referenced by CacheImpl< PageT, VictimPolicyT >::closeImpl().
PageBucketT CacheImpl< PageT, VictimPolicyT >::unmappedBucket [private] |
Bucket of free unmapped pages whose buffers are still allocated.
Definition at line 133 of file CacheImpl.h.
Referenced by CacheImpl< PageT, VictimPolicyT >::allocatePages(), and CacheImpl< PageT, VictimPolicyT >::closeImpl().
PageBucketT CacheImpl< PageT, VictimPolicyT >::unallocatedBucket [private] |
Bucket of free pages whose buffers are not allocated.
Definition at line 138 of file CacheImpl.h.
Referenced by CacheImpl< PageT, VictimPolicyT >::allocatePages(), CacheImpl< PageT, VictimPolicyT >::closeImpl(), CacheImpl< PageT, VictimPolicyT >::getAllocatedPageCount(), and CacheImpl< PageT, VictimPolicyT >::setAllocatedPageCount().
std::vector<PageBucketT *> CacheImpl< PageT, VictimPolicyT >::pageTable [private] |
Array of PageBuckets indexed by BlockId hash code.
This hash table is of fixed size, permitting synchronization-free access.
Definition at line 144 of file CacheImpl.h.
Referenced by CacheImpl< PageT, VictimPolicyT >::closeImpl().
uint CacheImpl< PageT, VictimPolicyT >::dirtyHighWaterPercent [private] |
Percentage of pages in the cache that must be dirty for lazy writes to be initiated.
Definition at line 150 of file CacheImpl.h.
Referenced by CacheImpl< PageT, VictimPolicyT >::calcDirtyThreshholds().
uint CacheImpl< PageT, VictimPolicyT >::dirtyLowWaterPercent [private] |
Percentage of pages in the cache that must be dirty for lazy writes to be suspended.
Definition at line 156 of file CacheImpl.h.
Referenced by CacheImpl< PageT, VictimPolicyT >::calcDirtyThreshholds().
uint CacheImpl< PageT, VictimPolicyT >::dirtyHighWaterMark [private] |
Number of dirty pages in the cache corresponding to the high-water percentage.
Definition at line 162 of file CacheImpl.h.
Referenced by CacheImpl< PageT, VictimPolicyT >::calcDirtyThreshholds().
uint CacheImpl< PageT, VictimPolicyT >::dirtyLowWaterMark [private] |
Number of dirty pages in the cache corresponding to the low-water percentage.
Definition at line 168 of file CacheImpl.h.
Referenced by CacheImpl< PageT, VictimPolicyT >::calcDirtyThreshholds().
bool CacheImpl< PageT, VictimPolicyT >::inFlushMode [private] |
Used by the lazy writer thread to indicate that the high-water dirty threshhold has been reached and page flushes should continue until the low-water threshhold is reached.
Definition at line 175 of file CacheImpl.h.
AtomicCounter CacheImpl< PageT, VictimPolicyT >::nCacheHits [private] |
AtomicCounter CacheImpl< PageT, VictimPolicyT >::nCacheRequests [private] |
AtomicCounter CacheImpl< PageT, VictimPolicyT >::nVictimizations [private] |
AtomicCounter CacheImpl< PageT, VictimPolicyT >::nDirtyPages [private] |
This is actually used for more than just statistics; the idle flush thread uses this in determining its activity.
Definition at line 197 of file CacheImpl.h.
AtomicCounter CacheImpl< PageT, VictimPolicyT >::nPageReads [private] |
AtomicCounter CacheImpl< PageT, VictimPolicyT >::nPageWrites [private] |
AtomicCounter CacheImpl< PageT, VictimPolicyT >::nRejectedCachePrefetches [private] |
AtomicCounter CacheImpl< PageT, VictimPolicyT >::nIoRetries [private] |
AtomicCounter CacheImpl< PageT, VictimPolicyT >::nSuccessfulCachePrefetches [private] |
AtomicCounter CacheImpl< PageT, VictimPolicyT >::nLazyWrites [private] |
AtomicCounter CacheImpl< PageT, VictimPolicyT >::nLazyWriteCalls [private] |
AtomicCounter CacheImpl< PageT, VictimPolicyT >::nVictimizationWrites [private] |
AtomicCounter CacheImpl< PageT, VictimPolicyT >::nCheckpointWrites [private] |
CacheStats CacheImpl< PageT, VictimPolicyT >::statsSinceInit [private] |
Accumulated state for all counters which are tracked since cache initialization.
Other fields are unused.
Definition at line 248 of file CacheImpl.h.
Referenced by CacheImpl< PageT, VictimPolicyT >::initializeStats().
StrictMutex CacheImpl< PageT, VictimPolicyT >::freePageMutex [private] |
LocalCondition CacheImpl< PageT, VictimPolicyT >::freePageCondition [private] |
Condition variable used for notification of free page availability.
Definition at line 258 of file CacheImpl.h.
std::vector<PageT *> CacheImpl< PageT, VictimPolicyT >::pages [private] |
A fixed-size vector of pointers to cache pages; we can get away with this because currently the number of pages is fixed at initialization.
This permits synchronization-free access to the collection.
Definition at line 265 of file CacheImpl.h.
Referenced by CacheImpl< PageT, VictimPolicyT >::allocatePages(), CacheImpl< PageT, VictimPolicyT >::closeImpl(), CacheImpl< PageT, VictimPolicyT >::getAllocatedPageCount(), CacheImpl< PageT, VictimPolicyT >::getMaxAllocatedPageCount(), and CacheImpl< PageT, VictimPolicyT >::setAllocatedPageCount().
DeviceAccessScheduler* CacheImpl< PageT, VictimPolicyT >::pDeviceAccessScheduler [private] |
Scheduler for asynchronous I/O.
Definition at line 270 of file CacheImpl.h.
Referenced by CacheImpl< PageT, VictimPolicyT >::closeImpl(), and CacheImpl< PageT, VictimPolicyT >::getDeviceAccessScheduler().
CacheAllocator& CacheImpl< PageT, VictimPolicyT >::bufferAllocator [private] |
Source of buffer memory.
Definition at line 275 of file CacheImpl.h.
Referenced by CacheImpl< PageT, VictimPolicyT >::allocatePages(), CacheImpl< PageT, VictimPolicyT >::closeImpl(), and CacheImpl< PageT, VictimPolicyT >::setAllocatedPageCount().
boost::scoped_ptr<CacheAllocator> CacheImpl< PageT, VictimPolicyT >::pBufferAllocator [private] |
VictimPolicyT CacheImpl< PageT, VictimPolicyT >::victimPolicy [private] |
The realization for the VictimPolicy model.
See LRUVictimPolicy for general information on the collaboration between CacheImpl and victimPolicy.
Definition at line 287 of file CacheImpl.h.
Referenced by CacheImpl< PageT, VictimPolicyT >::allocatePages(), CacheImpl< PageT, VictimPolicyT >::closeImpl(), and CacheImpl< PageT, VictimPolicyT >::setAllocatedPageCount().
TimerThread CacheImpl< PageT, VictimPolicyT >::timerThread [private] |
Thread for running idle flush.
Definition at line 292 of file CacheImpl.h.
Referenced by CacheImpl< PageT, VictimPolicyT >::closeImpl().
uint CacheImpl< PageT, VictimPolicyT >::idleFlushInterval [private] |
uint CacheImpl< PageT, VictimPolicyT >::prefetchPagesMax [private] |
uint CacheImpl< PageT, VictimPolicyT >::prefetchThrottleRate [private] |
The number of successful pre-fetches that have to occur before the pre-fetch rate is throttled back up, in the event that it has been throttled down due to rejected requests.
Definition at line 309 of file CacheImpl.h.
uint Cache::cbPage [protected, inherited] |
const DeviceId Cache::NULL_DEVICE_ID [static, inherited] |
The DeviceId assigned to the instance of RandomAccessNullDevice associated with every cache.
This device is automatically registered when the cache is opened and unregistered when the cache is closed. Scratch pages have this DeviceId in their BlockIds, but no real blocks can ever be mapped to this device.
Definition at line 74 of file Cache.h.
Referenced by CacheImpl< PageT, VictimPolicyT >::closeImpl(), CachePage::isScratchLocked(), ScratchSegment::lockPage(), and ScratchSegment::translatePageId().
bool ClosableObject::needsClose [protected, inherited] |
Definition at line 44 of file ClosableObject.h.
Referenced by SegStreamAllocation::beginWrite(), ExecStreamGraphImpl::clear(), ClosableObject::ClosableObject(), ClosableObject::close(), FlatFileBuffer::open(), ExecStreamGraphImpl::open(), ExecStream::open(), and ClosableObject::~ClosableObject().