door closed by defaul and vcg:: namespave
This commit is contained in:
parent
75ebfa9d5a
commit
0d4a84f012
|
@ -21,245 +21,253 @@ using namespace std;
|
||||||
/** Cache virtual base class. You are required to implement the pure virtual functions get, drop and size.
|
/** Cache virtual base class. You are required to implement the pure virtual functions get, drop and size.
|
||||||
*/
|
*/
|
||||||
|
|
||||||
|
namespace vcg {
|
||||||
|
|
||||||
template <typename Token> class Transfer;
|
template <typename Token> class Transfer;
|
||||||
|
|
||||||
template <typename Token>
|
template <typename Token>
|
||||||
class Cache: public Provider<Token> {
|
class Cache: public Provider<Token> {
|
||||||
|
|
||||||
public:
|
public:
|
||||||
///true if this is the last cache (the one we use the data from)
|
///true if this is the last cache (the one we use the data from)
|
||||||
bool final;
|
bool final;
|
||||||
//if true the cache will exit at the first opportunity
|
//if true the cache will exit at the first opportunity
|
||||||
bool quit;
|
bool quit;
|
||||||
///keeps track of changes (if 1 then something was loaded or dropped
|
///keeps track of changes (if 1 then something was loaded or dropped
|
||||||
mt::atomicInt new_data;
|
mt::atomicInt new_data;
|
||||||
///callback for new_data
|
///callback for new_data
|
||||||
void (*callback)(void *data);
|
void (*callback)(void *data);
|
||||||
|
|
||||||
///data is fetched from here
|
///data is fetched from here
|
||||||
Provider<Token> *input;
|
Provider<Token> *input;
|
||||||
|
|
||||||
///threads running over cache...
|
///threads running over cache...
|
||||||
std::vector<Transfer<Token> *> transfers;
|
std::vector<Transfer<Token> *> transfers;
|
||||||
|
|
||||||
protected:
|
protected:
|
||||||
///max space available
|
///max space available
|
||||||
uint64_t s_max;
|
uint64_t s_max;
|
||||||
///current space used
|
///current space used
|
||||||
uint64_t s_curr;
|
uint64_t s_curr;
|
||||||
|
|
||||||
public:
|
public:
|
||||||
Cache(uint64_t _capacity = INT_MAX):
|
Cache(uint64_t _capacity = INT_MAX):
|
||||||
final(false), quit(false), new_data(false), input(NULL), s_max(_capacity), s_curr(0) {}
|
final(false), quit(false), new_data(false), input(NULL), s_max(_capacity), s_curr(0) {}
|
||||||
virtual ~Cache() {}
|
virtual ~Cache() {}
|
||||||
|
|
||||||
void setInputCache(Provider<Token> *p) { input = p; }
|
void setInputCache(Provider<Token> *p) { input = p; }
|
||||||
uint64_t capacity() { return s_max; }
|
uint64_t capacity() { return s_max; }
|
||||||
uint64_t size() { return s_curr; }
|
uint64_t size() { return s_curr; }
|
||||||
void setCapacity(uint64_t c) { s_max = c; }
|
void setCapacity(uint64_t c) { s_max = c; }
|
||||||
|
|
||||||
///return true if the cache is waiting for priority to change
|
///return true if the cache is waiting for priority to change
|
||||||
bool newData() {
|
bool newData() {
|
||||||
bool r = new_data.testAndSetOrdered(1, 0); //if changed is 1, r is true
|
bool r = new_data.testAndSetOrdered(1, 0); //if changed is 1, r is true
|
||||||
return r;
|
return r;
|
||||||
}
|
|
||||||
|
|
||||||
///empty the cache. Make sure no resource is locked before calling this.
|
|
||||||
/// Require pause or stop before. Ensure there no locked item
|
|
||||||
void flush() {
|
|
||||||
//std::vector<Token *> tokens;
|
|
||||||
{
|
|
||||||
for(int i = 0; i < this->heap.size(); i++) {
|
|
||||||
Token *token = &(this->heap[i]);
|
|
||||||
//tokens.push_back(token);
|
|
||||||
s_curr -= drop(token);
|
|
||||||
assert(!(token->count >= Token::LOCKED));
|
|
||||||
if(final)
|
|
||||||
token->count.testAndSetOrdered(Token::READY, Token::CACHE);
|
|
||||||
input->heap.push(token);
|
|
||||||
}
|
|
||||||
this->heap.clear();
|
|
||||||
}
|
}
|
||||||
if(!s_curr == 0) {
|
|
||||||
std::cerr << "Cache size after flush is not ZERO!\n";
|
|
||||||
s_curr = 0;
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
///empty the cache. Make sure no resource is locked before calling this.
|
///empty the cache. Make sure no resource is locked before calling this.
|
||||||
/// Require pause or stop before. Ensure there no locked item
|
/// Require pause or stop before. Ensure there no locked item
|
||||||
template <class FUNCTOR> void flush(FUNCTOR functor) {
|
void flush() {
|
||||||
std::vector<Token *> tokens;
|
//std::vector<Token *> tokens;
|
||||||
{
|
{
|
||||||
int count = 0;
|
for(int i = 0; i < this->heap.size(); i++) {
|
||||||
mt::mutexlocker locker(&(this->heap_lock));
|
Token *token = &(this->heap[i]);
|
||||||
for(int k = 0; k < this->heap.size(); k++) {
|
//tokens.push_back(token);
|
||||||
Token *token = &this->heap[k];
|
s_curr -= drop(token);
|
||||||
if(functor(token)) { //drop it
|
assert(!(token->count >= Token::LOCKED));
|
||||||
tokens.push_back(token);
|
if(final)
|
||||||
s_curr -= drop(token);
|
token->count.testAndSetOrdered(Token::READY, Token::CACHE);
|
||||||
assert(token->count < Token::LOCKED);
|
input->heap.push(token);
|
||||||
if(final)
|
}
|
||||||
token->count.testAndSetOrdered(Token::READY, Token::CACHE);
|
this->heap.clear();
|
||||||
} else
|
}
|
||||||
this->heap.at(count++) = token;
|
if(!s_curr == 0) {
|
||||||
}
|
std::cerr << "Cache size after flush is not ZERO!\n";
|
||||||
this->heap.resize(count);
|
s_curr = 0;
|
||||||
this->heap_dirty = true;
|
}
|
||||||
}
|
}
|
||||||
{
|
|
||||||
mt::mutexlocker locker(&(input->heap_lock));
|
|
||||||
for(unsigned int i = 0; i < tokens.size(); i++) {
|
|
||||||
input->heap.push(tokens[i]);
|
|
||||||
}
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
virtual void abort() {}
|
///empty the cache. Make sure no resource is locked before calling this.
|
||||||
|
/// Require pause or stop before. Ensure there no locked item
|
||||||
|
template <class FUNCTOR> void flush(FUNCTOR functor) {
|
||||||
|
std::vector<Token *> tokens;
|
||||||
|
{
|
||||||
|
int count = 0;
|
||||||
|
mt::mutexlocker locker(&(this->heap_lock));
|
||||||
|
for(int k = 0; k < this->heap.size(); k++) {
|
||||||
|
Token *token = &this->heap[k];
|
||||||
|
if(functor(token)) { //drop it
|
||||||
|
tokens.push_back(token);
|
||||||
|
s_curr -= drop(token);
|
||||||
|
assert(token->count < Token::LOCKED);
|
||||||
|
if(final)
|
||||||
|
token->count.testAndSetOrdered(Token::READY, Token::CACHE);
|
||||||
|
} else
|
||||||
|
this->heap.at(count++) = token;
|
||||||
|
}
|
||||||
|
this->heap.resize(count);
|
||||||
|
this->heap_dirty = true;
|
||||||
|
}
|
||||||
|
{
|
||||||
|
mt::mutexlocker locker(&(input->heap_lock));
|
||||||
|
for(unsigned int i = 0; i < tokens.size(); i++) {
|
||||||
|
input->heap.push(tokens[i]);
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
virtual void abort() {}
|
||||||
|
|
||||||
protected:
|
protected:
|
||||||
///return the space used in the cache by the loaded resource
|
///return the space used in the cache by the loaded resource
|
||||||
virtual int size(Token *token) = 0;
|
virtual int size(Token *token) = 0;
|
||||||
///returns amount of space used in cache -1 for failed transfer
|
///returns amount of space used in cache -1 for failed transfer
|
||||||
virtual int get(Token *token) = 0;
|
virtual int get(Token *token) = 0;
|
||||||
///return amount removed
|
///return amount removed
|
||||||
virtual int drop(Token *token) = 0;
|
virtual int drop(Token *token) = 0;
|
||||||
///make sure the get function do not access token after abort is returned.
|
///make sure the get function do not access token after abort is returned.
|
||||||
|
|
||||||
|
|
||||||
|
|
||||||
|
|
||||||
///called in as first thing in run()
|
///called in as first thing in run()
|
||||||
virtual void begin() {}
|
virtual void begin() {}
|
||||||
///called in as last thing in run()
|
virtual void middle() {}
|
||||||
virtual void end() {}
|
///called in as last thing in run()
|
||||||
|
virtual void end() {}
|
||||||
|
|
||||||
///[should be protected]
|
///[should be protected]
|
||||||
void run() {
|
void run() {
|
||||||
assert(input);
|
assert(input);
|
||||||
/* basic operation of the cache:
|
/* basic operation of the cache:
|
||||||
1) transfer first element of input_cache if
|
1) transfer first element of input_cache if
|
||||||
cache has room OR first element in input as higher priority of last element
|
cache has room OR first element in input as higher priority of last element
|
||||||
2) make room until eliminating an element would leave space. */
|
2) make room until eliminating an element would leave space. */
|
||||||
begin();
|
begin();
|
||||||
while(!this->quit) {
|
while(!this->quit) {
|
||||||
input->check_queue.enter(); //wait for cache below to load something or priorities to change
|
input->check_queue.enter(true); //wait for cache below to load something or priorities to change
|
||||||
if(this->quit) break;
|
if(this->quit) break;
|
||||||
|
|
||||||
if(unload() || load()) {
|
middle();
|
||||||
new_data.testAndSetOrdered(0, 1); //if not changed, set as changed
|
|
||||||
input->check_queue.open(); //we signal ourselves to check again
|
if(unload() || load()) {
|
||||||
}
|
new_data.testAndSetOrdered(0, 1); //if not changed, set as changed
|
||||||
input->check_queue.leave();
|
input->check_queue.open(); //we signal ourselves to check again
|
||||||
|
}
|
||||||
|
input->check_queue.leave();
|
||||||
|
}
|
||||||
|
this->quit = false; //in case someone wants to restart;
|
||||||
|
end();
|
||||||
}
|
}
|
||||||
this->quit = false; //in case someone wants to restart;
|
|
||||||
end();
|
|
||||||
}
|
|
||||||
|
|
||||||
|
|
||||||
|
|
||||||
/** Checks wether we need to make room in the cache because of:
|
/** Checks wether we need to make room in the cache because of:
|
||||||
size() - sizeof(lowest priority item) > capacity()
|
size() - sizeof(lowest priority item) > capacity()
|
||||||
**/
|
**/
|
||||||
bool unload() {
|
bool unload() {
|
||||||
Token *remove = NULL;
|
Token *remove = NULL;
|
||||||
//make room int the cache checking that:
|
//make room int the cache checking that:
|
||||||
//1 we need to make room (capacity < current)
|
//1 we need to make room (capacity < current)
|
||||||
if(size() > capacity()) {
|
if(size() > capacity()) {
|
||||||
|
mt::mutexlocker locker(&(this->heap_lock));
|
||||||
|
|
||||||
mt::mutexlocker locker(&(this->heap_lock));
|
//2 we have some element not in the upper caches (heap.size() > 0
|
||||||
|
if(this->heap.size()) {
|
||||||
|
Token &last = this->heap.min();
|
||||||
|
int itemsize = size(&last);
|
||||||
|
|
||||||
//2 we have some element not in the upper caches (heap.size() > 0
|
//3 after removing the item, we are still full (avoids bouncing items)
|
||||||
if(this->heap.size()) {
|
if(size() - itemsize > capacity()) {
|
||||||
Token &last = this->heap.min();
|
|
||||||
int itemsize = size(&last);
|
|
||||||
|
|
||||||
//3 after removing the item, we are still full (avoids bouncing items)
|
//4 item to remove is not locked. (only in last cache. you can't lock object otherwise)
|
||||||
if(size() - itemsize > capacity()) {
|
if(!final) { //not final we can drop when we want
|
||||||
|
remove = this->heap.popMin();
|
||||||
//4 item to remove is not locked. (only in last cache. you can't lock object otherwise)
|
} else {
|
||||||
if(!final) { //not final we can drop when we want
|
last.count.testAndSetOrdered(Token::READY, Token::CACHE);
|
||||||
remove = this->heap.popMin();
|
if(last.count <= Token::CACHE) { //was not locked and now can't be locked, remove it.
|
||||||
} else {
|
remove = this->heap.popMin();
|
||||||
last.count.testAndSetOrdered(Token::READY, Token::CACHE);
|
} else { //last item is locked need to reorder stack
|
||||||
if(last.count <= Token::CACHE) { //was not locked and now can't be locked, remove it.
|
remove = this->heap.popMin();
|
||||||
remove = this->heap.popMin();
|
this->heap.push(remove);
|
||||||
} else { //last item is locked need to reorder stack
|
cout << "Reordering stack something (what?)\n";
|
||||||
remove = this->heap.popMin();
|
return true;
|
||||||
this->heap.push(remove);
|
}
|
||||||
return true;
|
}
|
||||||
|
}
|
||||||
}
|
}
|
||||||
}
|
|
||||||
}
|
}
|
||||||
}
|
|
||||||
|
if(remove) {
|
||||||
|
{
|
||||||
|
mt::mutexlocker input_locker(&(input->heap_lock));
|
||||||
|
int size = drop(remove);
|
||||||
|
assert(size >= 0);
|
||||||
|
s_curr -= size;
|
||||||
|
input->heap.push(remove);
|
||||||
|
}
|
||||||
|
cout << "Removing something (what?)\n";
|
||||||
|
return true;
|
||||||
|
}
|
||||||
|
return false;
|
||||||
}
|
}
|
||||||
|
|
||||||
if(remove) {
|
///should be protected
|
||||||
{
|
bool load() {
|
||||||
mt::mutexlocker input_locker(&(input->heap_lock));
|
Token *insert = NULL;
|
||||||
int size = drop(remove);
|
Token *last = NULL; //we want to lock only one heap at once to avoid deadlocks.
|
||||||
assert(size >= 0);
|
|
||||||
s_curr -= size;
|
|
||||||
input->heap.push(remove);
|
|
||||||
}
|
|
||||||
return true;
|
|
||||||
}
|
|
||||||
return false;
|
|
||||||
}
|
|
||||||
|
|
||||||
///should be protected
|
/* check wether we have room (curr < capacity) or heap is empty.
|
||||||
bool load() {
|
|
||||||
Token *insert = NULL;
|
|
||||||
Token *last = NULL; //we want to lock only one heap at once to avoid deadlocks.
|
|
||||||
|
|
||||||
/* check wether we have room (curr < capacity) or heap is empty.
|
|
||||||
empty heap is bad: we cannot drop anything to make room, and cache above has nothing to get.
|
empty heap is bad: we cannot drop anything to make room, and cache above has nothing to get.
|
||||||
this should not happen if we set correct cache sizes, but if it happens.... */
|
this should not happen if we set correct cache sizes, but if it happens.... */
|
||||||
{
|
|
||||||
mt::mutexlocker locker(&(this->heap_lock));
|
|
||||||
this->rebuild();
|
|
||||||
if(size() > capacity() && this->heap.size() > 0) {
|
|
||||||
last = &(this->heap.min()); //no room, set last so we might check for a swap.
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
{
|
|
||||||
mt::mutexlocker input_locker(&(input->heap_lock));
|
|
||||||
input->rebuild(); //if dirty rebuild
|
|
||||||
if(input->heap.size()) { //we need something in input to tranfer.
|
|
||||||
Token &first = input->heap.max();
|
|
||||||
if(first.count > Token::REMOVE &&
|
|
||||||
(!last || first.priority > last->priority)) { //if !last we already decided we want a transfer., otherwise check for a swap
|
|
||||||
insert = input->heap.popMax(); //remove item from heap, while we transfer it.
|
|
||||||
}
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
if(insert) { //we want to fetch something
|
|
||||||
|
|
||||||
int size = get(insert);
|
|
||||||
|
|
||||||
if(size >= 0) { //success
|
|
||||||
s_curr += size;
|
|
||||||
{
|
{
|
||||||
mt::mutexlocker locker(&(this->heap_lock));
|
mt::mutexlocker locker(&(this->heap_lock));
|
||||||
if(final)
|
this->rebuild();
|
||||||
insert->count.ref(); //now lock is 0 and can be locked
|
if(size() > capacity() && this->heap.size() > 0) {
|
||||||
|
last = &(this->heap.min()); //no room, set last so we might check for a swap.
|
||||||
this->heap.push(insert);
|
}
|
||||||
}
|
}
|
||||||
this->check_queue.open(); //we should signal the parent cache that we have a new item
|
|
||||||
return true;
|
|
||||||
|
|
||||||
} else { //failed transfer put it back, we will keep trying to transfer it...
|
{
|
||||||
mt::mutexlocker input_locker(&(input->heap_lock));
|
mt::mutexlocker input_locker(&(input->heap_lock));
|
||||||
input->heap.push(insert);
|
input->rebuild(); //if dirty rebuild
|
||||||
|
if(input->heap.size()) { //we need something in input to tranfer.
|
||||||
|
Token &first = input->heap.max();
|
||||||
|
if(first.count > Token::REMOVE &&
|
||||||
|
(!last || first.priority > last->priority)) { //if !last we already decided we want a transfer., otherwise check for a swap
|
||||||
|
insert = input->heap.popMax(); //remove item from heap, while we transfer it.
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
if(insert) { //we want to fetch something
|
||||||
|
|
||||||
|
int size = get(insert);
|
||||||
|
|
||||||
|
if(size >= 0) { //success
|
||||||
|
s_curr += size;
|
||||||
|
{
|
||||||
|
mt::mutexlocker locker(&(this->heap_lock));
|
||||||
|
if(final)
|
||||||
|
insert->count.ref(); //now lock is 0 and can be locked
|
||||||
|
|
||||||
|
this->heap.push(insert);
|
||||||
|
}
|
||||||
|
this->check_queue.open(); //we should signal the parent cache that we have a new item
|
||||||
|
return true;
|
||||||
|
|
||||||
|
} else { //failed transfer put it back, we will keep trying to transfer it...
|
||||||
|
mt::mutexlocker input_locker(&(input->heap_lock));
|
||||||
|
input->heap.push(insert);
|
||||||
|
return false;
|
||||||
|
}
|
||||||
|
}
|
||||||
return false;
|
return false;
|
||||||
}
|
|
||||||
}
|
}
|
||||||
return false;
|
|
||||||
}
|
|
||||||
};
|
};
|
||||||
|
|
||||||
|
} //namespace
|
||||||
/* TODO use the following class to allow multiple cache transfers at the same time */
|
/* TODO use the following class to allow multiple cache transfers at the same time */
|
||||||
|
|
||||||
/*
|
/*
|
||||||
|
|
|
@ -5,6 +5,7 @@
|
||||||
|
|
||||||
/** Allows to insert tokens, update priorities and generally control the cache.
|
/** Allows to insert tokens, update priorities and generally control the cache.
|
||||||
*/
|
*/
|
||||||
|
namespace vcg {
|
||||||
|
|
||||||
template <class Token>
|
template <class Token>
|
||||||
class Controller {
|
class Controller {
|
||||||
|
@ -66,7 +67,6 @@ class Controller {
|
||||||
///ensure that added tokens are processed and existing ones have their priority updated.
|
///ensure that added tokens are processed and existing ones have their priority updated.
|
||||||
///potential bug! update is done on the heaps, if something is in transit...
|
///potential bug! update is done on the heaps, if something is in transit...
|
||||||
void updatePriorities() {
|
void updatePriorities() {
|
||||||
|
|
||||||
if(tokens.size()) {
|
if(tokens.size()) {
|
||||||
mt::mutexlocker l(&provider.heap_lock);
|
mt::mutexlocker l(&provider.heap_lock);
|
||||||
for(unsigned int i = 0; i < tokens.size(); i++)
|
for(unsigned int i = 0; i < tokens.size(); i++)
|
||||||
|
@ -140,6 +140,7 @@ class Controller {
|
||||||
void resume() {
|
void resume() {
|
||||||
assert(!stopped);
|
assert(!stopped);
|
||||||
assert(paused);
|
assert(paused);
|
||||||
|
cout << "Resume" << endl;
|
||||||
|
|
||||||
//unlock and open all doors
|
//unlock and open all doors
|
||||||
for(unsigned int i = 0; i < caches.size(); i++) {
|
for(unsigned int i = 0; i < caches.size(); i++) {
|
||||||
|
@ -177,5 +178,5 @@ class Controller {
|
||||||
}
|
}
|
||||||
};
|
};
|
||||||
|
|
||||||
|
} //namespace
|
||||||
#endif // CONTROLLER_H
|
#endif // CONTROLLER_H
|
||||||
|
|
|
@ -17,6 +17,8 @@
|
||||||
You should never interact with this class.
|
You should never interact with this class.
|
||||||
*/
|
*/
|
||||||
|
|
||||||
|
namespace vcg {
|
||||||
|
|
||||||
template <typename Token>
|
template <typename Token>
|
||||||
class Provider: public mt::thread {
|
class Provider: public mt::thread {
|
||||||
public:
|
public:
|
||||||
|
@ -76,5 +78,5 @@ class Provider: public mt::thread {
|
||||||
}
|
}
|
||||||
};
|
};
|
||||||
|
|
||||||
|
} //namespace
|
||||||
#endif
|
#endif
|
||||||
|
|
|
@ -15,6 +15,7 @@
|
||||||
The Priority template argument can simply be a floating point number
|
The Priority template argument can simply be a floating point number
|
||||||
or something more complex, (frame and error in pixel); the only
|
or something more complex, (frame and error in pixel); the only
|
||||||
requirement is the existence of a < comparison operator */
|
requirement is the existence of a < comparison operator */
|
||||||
|
namespace vcg {
|
||||||
|
|
||||||
template <typename Priority>
|
template <typename Priority>
|
||||||
class Token {
|
class Token {
|
||||||
|
@ -83,4 +84,5 @@ class Token {
|
||||||
}
|
}
|
||||||
};
|
};
|
||||||
|
|
||||||
|
} //namespace
|
||||||
#endif // GCACHE_H
|
#endif // GCACHE_H
|
||||||
|
|
Loading…
Reference in New Issue