@@ -146,7 +146,7 @@ Log::allocateHead()
146
146
// if you don't know why this is bad.
147
147
LogDigest::SegmentId newHeadId = allocateSegmentId ();
148
148
149
- boost ::lock_guard<SpinLock> lock (listLock);
149
+ std ::lock_guard<SpinLock> lock (listLock);
150
150
151
151
if (head != NULL )
152
152
cleanableNewList.push_back (*head);
@@ -207,7 +207,7 @@ Log::allocateHead()
207
207
bool
208
208
Log::isSegmentLive (uint64_t segmentId)
209
209
{
210
- boost ::lock_guard<SpinLock> lock (listLock);
210
+ std ::lock_guard<SpinLock> lock (listLock);
211
211
return (activeIdMap.find (segmentId) != activeIdMap.end ());
212
212
}
213
213
@@ -504,7 +504,7 @@ void *
504
504
Log::getSegmentMemoryForCleaning (bool useEmergencyReserve)
505
505
{
506
506
if (useEmergencyReserve) {
507
- boost ::lock_guard<SpinLock> lock (listLock);
507
+ std ::lock_guard<SpinLock> lock (listLock);
508
508
509
509
if (emergencyCleanerList.empty ())
510
510
return NULL ;
@@ -523,7 +523,7 @@ Log::getSegmentMemoryForCleaning(bool useEmergencyReserve)
523
523
size_t
524
524
Log::freeListCount ()
525
525
{
526
- boost ::lock_guard<SpinLock> lock (listLock);
526
+ std ::lock_guard<SpinLock> lock (listLock);
527
527
528
528
// We always save one for the next Log head, so adjust accordingly.
529
529
return (freeList.size () > 0 ) ? freeList.size () - 1 : freeList.size ();
@@ -540,7 +540,7 @@ Log::freeListCount()
540
540
void
541
541
Log::getNewCleanableSegments (SegmentVector& out)
542
542
{
543
- boost ::lock_guard<SpinLock> lock (listLock);
543
+ std ::lock_guard<SpinLock> lock (listLock);
544
544
545
545
while (!cleanableNewList.empty ()) {
546
546
Segment& s = cleanableNewList.front ();
@@ -564,7 +564,7 @@ Log::getNewCleanableSegments(SegmentVector& out)
564
564
void
565
565
Log::cleaningInto (Segment* segment)
566
566
{
567
- boost ::lock_guard<SpinLock> lock (listLock);
567
+ std ::lock_guard<SpinLock> lock (listLock);
568
568
569
569
cleaningIntoList.push_back (*segment);
570
570
activeIdMap[segment->getId ()] = segment;
592
592
Log::cleaningComplete (SegmentVector& clean,
593
593
std::vector<void *>& unusedSegmentMemory)
594
594
{
595
- boost ::lock_guard<SpinLock> lock (listLock);
595
+ std ::lock_guard<SpinLock> lock (listLock);
596
596
bool change = false ;
597
597
598
598
// Return any unused segment memory the cleaner ended up
@@ -673,7 +673,7 @@ uint64_t
673
673
Log::allocateSegmentId ()
674
674
{
675
675
// XXX- could just be an atomic op
676
- boost ::lock_guard<SpinLock> lock (listLock);
676
+ std ::lock_guard<SpinLock> lock (listLock);
677
677
return nextSegmentId++;
678
678
}
679
679
@@ -792,7 +792,7 @@ Log::locklessAddToFreeList(void *p)
792
792
void *
793
793
Log::getFromFreeList (bool mayUseLastSegment)
794
794
{
795
- boost ::lock_guard<SpinLock> lock (listLock);
795
+ std ::lock_guard<SpinLock> lock (listLock);
796
796
797
797
if (freeList.empty ())
798
798
throw LogOutOfMemoryException (HERE, " Log is out of space" );
@@ -837,7 +837,7 @@ Log::getSegmentFromAddress(const void* address)
837
837
{
838
838
const void *base = Segment::getSegmentBaseAddress (address, segmentCapacity);
839
839
840
- boost ::lock_guard<SpinLock> lock (listLock);
840
+ std ::lock_guard<SpinLock> lock (listLock);
841
841
842
842
if (head != NULL && base == head->getBaseAddress ())
843
843
return head;
0 commit comments