2011-03-11 17:14:54 +01:00
|
|
|
#ifndef GCACHE_CONTROLLER_H
|
|
|
|
#define GCACHE_CONTROLLER_H
|
|
|
|
|
|
|
|
#include "cache.h"
|
|
|
|
|
|
|
|
/** Allows to insert tokens, update priorities and generally control the cache.
|
|
|
|
*/
|
|
|
|
|
|
|
|
template <class Token>
|
|
|
|
class Controller {
|
|
|
|
public:
|
2011-11-26 19:08:30 +01:00
|
|
|
///tokens waiting to be added, should be private
|
|
|
|
std::vector<Token *> tokens;
|
|
|
|
/// threads still running, but no door is open in caches,
|
|
|
|
///transfers might still be going on!
|
2011-03-11 17:14:54 +01:00
|
|
|
bool paused;
|
2011-11-26 19:08:30 +01:00
|
|
|
///all cache threads are stopped
|
2011-03-11 17:14:54 +01:00
|
|
|
bool stopped;
|
|
|
|
|
|
|
|
public:
|
|
|
|
///should be protected
|
|
|
|
Provider<Token> provider;
|
|
|
|
///should be protected
|
|
|
|
std::vector<Cache<Token> *> caches;
|
|
|
|
|
2011-11-26 19:08:30 +01:00
|
|
|
Controller(): paused(false), stopped(true) {}
|
2011-11-29 13:41:28 +01:00
|
|
|
~Controller() { if(!stopped) finish(); }
|
2011-03-11 17:14:54 +01:00
|
|
|
|
|
|
|
///called before the cache is started to add a cache in the chain
|
2011-11-26 19:08:30 +01:00
|
|
|
/** The order in which the caches are added is from the lowest to the highest. */
|
2011-03-11 17:14:54 +01:00
|
|
|
void addCache(Cache<Token> *cache) {
|
|
|
|
if(caches.size() == 0)
|
|
|
|
cache->setInputCache(&provider);
|
|
|
|
else
|
|
|
|
cache->setInputCache(caches.back());
|
|
|
|
assert(cache->input);
|
|
|
|
caches.push_back(cache);
|
|
|
|
}
|
2011-11-26 19:08:30 +01:00
|
|
|
|
2011-03-21 16:44:00 +01:00
|
|
|
///insert the token in the cache if not already present (actual insertion is done on updatePriorities)
|
2011-03-14 12:35:43 +01:00
|
|
|
bool addToken(Token *token) {
|
|
|
|
if(token->count.testAndSetOrdered(Token::OUTSIDE, Token::CACHE)) {
|
|
|
|
tokens.push_back(token);
|
|
|
|
return true;
|
|
|
|
}
|
|
|
|
return false;
|
2011-03-11 17:14:54 +01:00
|
|
|
}
|
|
|
|
|
|
|
|
///WARNING: migh stall for the time needed to drop tokens from cache.
|
|
|
|
//FUNCTOR has bool operator(Token *) and return true to remove
|
|
|
|
template<class FUNCTOR> void removeTokens(FUNCTOR functor) {
|
2011-06-09 17:30:16 +02:00
|
|
|
pause(); //this might actually be unnecessary if you mark tokens to be removed
|
|
|
|
for(int i = (int)caches.size()-1; i >= 0; i--)
|
2011-03-11 17:14:54 +01:00
|
|
|
caches[i]->flush(functor);
|
|
|
|
provider.flush(functor);
|
|
|
|
|
2011-06-09 17:30:16 +02:00
|
|
|
resume();
|
2011-03-11 17:14:54 +01:00
|
|
|
}
|
|
|
|
|
|
|
|
///if more tokens than m present in the provider, lowest priority ones will be removed
|
|
|
|
void setMaxTokens(int m) {
|
2011-12-14 18:42:23 +01:00
|
|
|
mt::mutexlocker l(&provider.heap_lock);
|
2011-03-11 17:14:54 +01:00
|
|
|
provider.max_tokens = m;
|
|
|
|
}
|
|
|
|
|
|
|
|
///ensure that added tokens are processed and existing ones have their priority updated.
|
2011-12-20 19:25:13 +01:00
|
|
|
///potential bug! update is done on the heaps, if something is in transit...
|
2011-03-11 17:14:54 +01:00
|
|
|
void updatePriorities() {
|
|
|
|
|
|
|
|
if(tokens.size()) {
|
2011-12-14 18:42:23 +01:00
|
|
|
mt::mutexlocker l(&provider.heap_lock);
|
2011-03-11 17:14:54 +01:00
|
|
|
for(unsigned int i = 0; i < tokens.size(); i++)
|
|
|
|
provider.heap.push(tokens[i]);
|
|
|
|
tokens.clear();
|
|
|
|
}
|
|
|
|
|
|
|
|
provider.pushPriorities();
|
|
|
|
for(unsigned int i = 0; i < caches.size(); i++)
|
|
|
|
caches[i]->pushPriorities();
|
|
|
|
}
|
|
|
|
|
|
|
|
///start the various cache threads.
|
|
|
|
void start() {
|
2011-12-05 11:11:08 +01:00
|
|
|
if(!stopped) return;
|
2011-03-11 17:14:54 +01:00
|
|
|
assert(!paused);
|
|
|
|
assert(caches.size() > 1);
|
|
|
|
caches.back()->final = true;
|
|
|
|
for(unsigned int i = 0; i < caches.size(); i++) //cache 0 is a provider, and his thread is not running.
|
|
|
|
caches[i]->start();
|
|
|
|
stopped = false;
|
|
|
|
}
|
2011-11-29 13:41:28 +01:00
|
|
|
|
|
|
|
///stops the cache threads
|
2011-03-11 17:14:54 +01:00
|
|
|
void stop() {
|
2011-12-05 11:11:08 +01:00
|
|
|
if(stopped) return;
|
2011-11-29 13:41:28 +01:00
|
|
|
assert(!paused);
|
|
|
|
|
|
|
|
//signal al caches to quit
|
2011-12-05 11:11:08 +01:00
|
|
|
for(unsigned int i = 0; i < caches.size(); i++)
|
2011-11-29 13:41:28 +01:00
|
|
|
caches[i]->quit = true;
|
2011-12-05 11:11:08 +01:00
|
|
|
|
2011-11-29 13:41:28 +01:00
|
|
|
//abort current gets
|
2011-12-05 11:11:08 +01:00
|
|
|
for(unsigned int i = 0; i < caches.size(); i++)
|
2011-11-29 13:41:28 +01:00
|
|
|
caches[i]->abort();
|
2011-12-05 11:11:08 +01:00
|
|
|
|
2011-11-29 13:41:28 +01:00
|
|
|
//make sure all caches actually run a cycle.
|
|
|
|
for(unsigned int i = 0; i < caches.size(); i++)
|
|
|
|
caches[i]->input->check_queue.open();
|
|
|
|
|
2011-12-05 11:11:08 +01:00
|
|
|
for(unsigned int i = 0; i < caches.size(); i++)
|
2011-03-11 17:14:54 +01:00
|
|
|
caches[i]->wait();
|
2011-12-05 11:11:08 +01:00
|
|
|
|
2011-03-11 17:14:54 +01:00
|
|
|
stopped = true;
|
|
|
|
}
|
|
|
|
|
|
|
|
void finish() {
|
|
|
|
stop();
|
2011-11-29 13:41:28 +01:00
|
|
|
flush();
|
2011-03-11 17:14:54 +01:00
|
|
|
}
|
|
|
|
|
|
|
|
void pause() {
|
2011-11-29 13:41:28 +01:00
|
|
|
assert(!stopped);
|
|
|
|
assert(!paused);
|
|
|
|
|
|
|
|
//lock all doors.
|
2011-12-05 11:11:08 +01:00
|
|
|
for(unsigned int i = 0; i < caches.size(); i++)
|
2011-11-29 13:41:28 +01:00
|
|
|
caches[i]->input->check_queue.lock();
|
|
|
|
|
|
|
|
//abort all pending calls
|
2011-12-05 11:11:08 +01:00
|
|
|
for(unsigned int i = 0; i < caches.size(); i++)
|
2011-11-29 13:41:28 +01:00
|
|
|
caches[i]->abort();
|
|
|
|
|
|
|
|
//make sure no cache is running (must be done after abort! otherwise we have to wait for the get)
|
|
|
|
for(unsigned int i = 0; i < caches.size()-1; i++)
|
|
|
|
caches[i]->input->check_queue.room.lock();
|
|
|
|
|
2011-03-11 17:14:54 +01:00
|
|
|
paused = true;
|
|
|
|
}
|
|
|
|
|
|
|
|
void resume() {
|
2011-11-29 13:41:28 +01:00
|
|
|
assert(!stopped);
|
|
|
|
assert(paused);
|
|
|
|
|
|
|
|
//unlock and open all doors
|
2011-12-05 11:11:08 +01:00
|
|
|
for(unsigned int i = 0; i < caches.size(); i++) {
|
2011-11-29 13:41:28 +01:00
|
|
|
caches[i]->input->check_queue.unlock();
|
|
|
|
caches[i]->input->check_queue.open();
|
|
|
|
}
|
|
|
|
|
|
|
|
//allow all cache to enter again.
|
2011-06-09 17:30:16 +02:00
|
|
|
for(unsigned int i = 0; i < caches.size()-1; i++)
|
2011-11-29 13:41:28 +01:00
|
|
|
caches[i]->input->check_queue.room.unlock();
|
2011-06-09 17:30:16 +02:00
|
|
|
|
2011-03-11 17:14:54 +01:00
|
|
|
paused = false;
|
|
|
|
}
|
2011-06-06 17:54:47 +02:00
|
|
|
///empty all caches AND REMOVES ALL TOKENS!
|
2011-03-11 17:14:54 +01:00
|
|
|
void flush() {
|
2011-03-14 12:35:43 +01:00
|
|
|
for(int i = (int)caches.size()-1; i >= 0; i--)
|
2011-03-11 17:14:54 +01:00
|
|
|
caches[i]->flush();
|
2011-06-06 17:54:47 +02:00
|
|
|
provider.heap.clear();
|
2011-03-11 17:14:54 +01:00
|
|
|
}
|
2011-11-29 13:41:28 +01:00
|
|
|
|
2011-12-09 17:06:08 +01:00
|
|
|
bool newData() {
|
2011-11-19 01:39:14 +01:00
|
|
|
bool c = false;
|
|
|
|
for(int i = (int)caches.size() -1; i >= 0; i--) {
|
2011-12-09 17:06:08 +01:00
|
|
|
c |= caches[i]->newData();
|
2011-11-19 01:39:14 +01:00
|
|
|
}
|
|
|
|
return c;
|
|
|
|
}
|
2011-11-29 13:41:28 +01:00
|
|
|
|
2011-03-14 12:35:43 +01:00
|
|
|
bool isWaiting() {
|
2011-11-19 01:39:14 +01:00
|
|
|
bool waiting = true;
|
2011-03-14 12:35:43 +01:00
|
|
|
for(int i = (int)caches.size() -1; i >= 0; i--) {
|
2011-11-19 01:39:14 +01:00
|
|
|
waiting &= caches[i]->input->check_queue.isWaiting();
|
2011-03-14 12:35:43 +01:00
|
|
|
}
|
2011-11-19 01:39:14 +01:00
|
|
|
return waiting;
|
2011-03-14 12:35:43 +01:00
|
|
|
}
|
2011-03-11 17:14:54 +01:00
|
|
|
};
|
|
|
|
|
|
|
|
|
|
|
|
#endif // CONTROLLER_H
|