Hi, I wrote a simple program that simple generates tags and processes them (attached below). However its performance degrades when I increase the number of threads. Would very appreciate any insight has to how/why to address the scalability issue. Thanks Sandeep //Stack.cnc ; ; :: (l1compute); :: (l2compute); env->; (l1compute)->;
;;
:: (l1compute); :: (l2compute);
env->;(l1compute)->;
//Stack.cpp
#include
#include
#include "stack.h"
#include
stack_context c;
int ctr = 0;
// Create an instance of the context class which defines the graph
int main(int argc, char** argv)
{
clock_t start, end;
double elapsed;
start = clock();
for(int j = 0; j < 4; ++j)
{
for(int i = 0; i < 3000000; ++i)
{
c.l1stack.put(j*3000000+ i);
}
}
elapsed = ((double) (end-start))/CLOCKS_PER_SEC;
c.wait();
end = clock();
elapsed = ((double) (end-start))/CLOCKS_PER_SEC;
std::cout<<"Elapsed "<
}
int l1compute::execute(const int & t, stack_context & c ) const
{
c.l2stack.put(t);
return CnC::CNC_Success;
}
int l2compute::execute(const int & t, stack_context & c ) const
{
return CnC::CNC_Success;
}