Merge branch 'alternate_multithreaded'

This commit is contained in:
2025-09-10 08:35:40 -05:00
5 changed files with 448 additions and 181 deletions

View File

@@ -2,13 +2,24 @@
recent_file: path: "inc/genetic.h"
recent_file: path: "inc/sync.h"
recent_file: path: "d:/os/obj/amd64fre/minkernel/crts/ucrt/src/appcrt/startup/mt/objfre/amd64/minkernel/crts/ucrt/src/appcrt/startup/abort.cpp"
recent_file: path: "src/main.cpp"
recent_file: path: "d:/os/obj/amd64fre/minkernel/crts/ucrt/src/appcrt/startup/mt/objfre/amd64/minkernel/crts/ucrt/src/appcrt/startup/assert.cpp"
recent_file: path: "../../../../../Program Files/Microsoft Visual Studio/2022/Community/VC/Tools/MSVC/14.44.35207/include/vector"
recent_file: path: "d:/os/obj/amd64fre/minkernel/crts/ucrt/src/appcrt/misc/mt/objfre/amd64/minkernel/crts/ucrt/src/appcrt/misc/invalid_parameter.cpp"
recent_file: path: "../../../../../Program Files/Microsoft Visual Studio/2022/Community/VC/Tools/MSVC/14.44.35207/include/xmemory"
recent_file: path: "../../../../../Program Files/Microsoft Visual Studio/2022/Community/VC/Tools/MSVC/14.42.34433/include/algorithm"
recent_file: path: "../../../../../Program Files/Microsoft Visual Studio/2022/Community/VC/Tools/MSVC/14.42.34433/include/xutility"
recent_file: path: "src/main.cpp"
target:
{
executable: "bin/main.exe"
working_directory: bin
label: main
enabled: 1
arguments: 1
}
breakpoint:
{
source_location: "inc/genetic.h:292:1"
hit_count: 1
}

View File

@@ -1,9 +1,10 @@
#pragma once
#include <algorithm>
#include <cfloat>
#include <cstdlib>
#include <vector>
#include "util.h"
#include "sync.h"
#include "rand.h"
@@ -12,21 +13,31 @@ using namespace std;
namespace genetic {
template <class T> struct Array;
template <class T> struct Stats;
template <class T> struct Strategy;
struct CellTracker;
template <class T> Stats<T> run(Strategy<T>);
template <class T> T run(Strategy<T>);
template <class T> struct Strategy {
// Number of worker threads that will be evaluating cell fitness
int num_threads;
int batch_size; // Number of cells a worker thread tries to work on in a row
// before accessing/locking the work queue again.
int num_cells; // Size of the population pool
int num_generations; // Number of times (epochs) to run the algorithm
// Period of print statements (in seconds)
float stats_print_period_s;
// Size of the population pool per sim thread
int num_cells_per_thread;
// Number of times (epochs) to run the algorithm
int num_generations;
// Each thread will integrate the best globally performing cell
bool share_breakthroughs;
// How many generations to explore before resyncing with the global best
int share_breakthrough_gen_period;
bool test_all; // Sets whether or not every cell's fitness is evaluated every
// generation
float test_chance; // Chance to test any given cell's fitness. Relevant only
@@ -59,10 +70,16 @@ template <class T> struct Strategy {
};
template<class T> struct Stats {
std::vector<T> best_cell;
std::vector<float> best_cell_fitness;
TimeSpan setup_time;
TimeSpan run_time;
DynArray<T> best_cells;
DynArray<float> best_cell_fitness;
int gen;
bool done;
DynArray<TimeSpan> gen_time;
DynArray<TimeSpan> crossover_time;
DynArray<TimeSpan> mutate_time;
DynArray<TimeSpan> fitness_time;
DynArray<TimeSpan> sorting_time;
Mutex m;
};
struct CellTracker {
@@ -70,150 +87,81 @@ struct CellTracker {
int cellid;
};
template <class T> struct Array {
T *data;
int len;
T &operator[](int i) { return data[i]; }
};
template <class T> Array<T> make_array(int len) {
return {
.data = (T*)malloc(sizeof(T)*len),
.len = len
};
}
template<class T>
struct MutateJob {
T* cell;
};
template<class T>
struct CrossoverJob {
Array<T*> parents;
Array<T*> children;
};
template<class T>
struct FitnessJob {
T* cell;
CellTracker* track;
};
enum class JobType {
MUTATE,
CROSSOVER,
FITNESS
};
template<class T>
union Job {
MutateJob<T> m;
CrossoverJob<T> c;
FitnessJob<T> f;
};
// Yes. I am aware of variant
// For some reason I like this better
template<class T>
struct TaggedJob {
Job<T> data;
JobType type;
};
template<class T>
struct WorkQueue {
Array<TaggedJob<T>> jobs;
int read_i, write_i, batch_size;
bool done_writing, work_complete, stop; // These catch some edge conditions
Mutex m;
ConditionVar done;
ConditionVar jobs_ready;
};
template<class T>
struct WorkerThreadArgs {
WorkQueue<T> &q;
Strategy<T> &s;
Strategy<T> strat;
Array<T> cells;
Array<CellTracker> trackers;
Stats<T> *stats;
Mutex m;
float *best_global_score;
T* best_global_cell;
};
template<class T> WorkQueue<T> make_work_queue(int len, int batch_size);
template<class T> bool tryget_job_batch(WorkQueue<T> &q, int len, Array<TaggedJob<T>>* out_batch, bool* out_batch_is_end);
template<class T> T* _cellp(Array<T> cells, CellTracker tracker) { return &cells[tracker.cellid]; }
template<class T>
DWORD worker(LPVOID args);
template <class T> DWORD worker(LPVOID args) {
// Unpack everything...
WorkerThreadArgs<T>* worker_args = static_cast<WorkerThreadArgs<T>*>(args);
Strategy<T> strat = worker_args->strat;
Array<T> cells = worker_args->cells;
Array<CellTracker> trackers = worker_args->trackers;
Stats<T> &stats = *worker_args->stats;
float* best_global_score = worker_args->best_global_score;
T* best_global_cell = worker_args->best_global_cell;
Mutex best_m = worker_args->m;
template <class T> Stats<T> run(Strategy<T> strat) {
Stats<T> stats;
// ************* SETUP **************
TimeSpan start_setup = now();
// Create cells
Array<T> cells = make_array<T>(strat.num_cells);
for (int i = 0; i < cells.len; i++) cells[i] = strat.make_default_cell();
// Create cell trackers
Array<CellTracker> trackers = make_array<CellTracker>(strat.num_cells);
for (int i = 0; i < trackers.len; i++) trackers[i] = { .score=0, .cellid=i };
// Create work queue
// Worst case size is every cell mutated, crossed, and evaluated...? Not quite, but 3x should be upper bound
WorkQueue<T> q = make_work_queue<T>(3*strat.num_cells, strat.batch_size);
WorkerThreadArgs<T> args = {q, strat};
// Create worker threads
Thread *threads = (Thread*)malloc(sizeof(Thread*)*strat.num_threads);
for (int i = 0; i < strat.num_threads; i++) {
threads[i] = make_thread(worker<T>, &args);
}
stats.setup_time = now() - start_setup;
// *********** ALGORITHM ************
TimeSpan start_algo = now();
for (int gen = 0; gen < strat.num_generations; gen++) {
// Reset work queue
lock(q.m);
q.read_i = 0;
q.write_i = 0;
q.work_complete = false;
q.done_writing = false;
unlock(q.m);
// 1. mutate
for (int i = 0; i < trackers.len; i++) {
if (abs(norm_rand(strat.rand_seed)) < strat.mutation_chance) {
MutateJob<T> mj = {&cells[trackers[i].cellid]};
TaggedJob<T> job;
job.data.m = mj;
job.type=JobType::MUTATE;
q.jobs[q.write_i++] = job;
}
}
wake_all(q.jobs_ready); // There are available jobs for the worker threads!
// 2. crossover
if (strat.enable_crossover) {
// Prepare crossover operations as these will be the same every time except
// for the exact cell pointers
int npar = strat.crossover_parent_num;
int nchild = strat.crossover_children_num;
Array<T*> parents = make_array<T*>(npar);
Array<T*> children = make_array<T*>(nchild);
bool gt = strat.higher_fitness_is_better; // Writing strat.higher... is annoying
// printf("Core: %d\n", get_affinity());
TimeSpan start, diff, gen_start;
while(stats.gen < strat.num_generations) {
gen_start = now();
// 0. Share/Integrate global breakthrough
if (strat.share_breakthroughs && (stats.gen + get_affinity()) % strat.share_breakthrough_gen_period) {
lock(best_m);
if (better(gt, front(trackers).score, *best_global_score) != *best_global_score) {
// Share
*best_global_cell = *_cellp(cells, trackers[0]);
*best_global_score = trackers[0].score;
} else {
// Integrate
*_cellp(cells, trackers[0]) = *best_global_cell;
trackers[0].score = *best_global_score;
}
unlock(best_m);
}
// 1. crossover
start = now();
if (strat.enable_crossover) {
int parent_end = npar;
int child_begin = trackers.len-nchild;
while (parent_end <= child_begin) {
// TODO: Variable size arrays please. This is rediculous.
Array<T*> parents = make_array<T*>(npar);
Array<T*> children = make_array<T*>(nchild);
// Get pointers to all the parent cells
for (int i = parent_end-npar; i < parent_end; i++) {
parents[i - (parent_end-npar)] = &cells[trackers[i].cellid];
T* cell = _cellp(cells, trackers[i]);
assert(cell != NULL);
parents[i - (parent_end-npar)] = cell;
}
// Get pointers to all the child cells (these will be overwritten)
for (int i = child_begin; i < child_begin+nchild; i++) {
children[i-child_begin] = &cells[trackers[i].cellid];
T* cell = _cellp(cells, trackers[i]);
assert(cell != NULL);
children[i-child_begin] = cell;
}
CrossoverJob<T> cj = {parents, children};
TaggedJob<T> job;
@@ -223,10 +171,25 @@ template <class T> Stats<T> run(Strategy<T> strat) {
parent_end += strat.crossover_parent_stride;
child_begin -= nchild;
}
wake_all(q.jobs_ready); // There are available jobs for the worker threads!
}
lock(stats.m);
append(stats.crossover_time, now() - start);
unlock(stats.m);
// 2. mutate
start = now();
for (int i = 0; i < trackers.len; i++) {
if (abs(norm_rand(strat.rand_seed)) < strat.mutation_chance) {
strat.mutate(cells[trackers[i].cellid]);
}
}
lock(stats.m);
append(stats.mutate_time, now() - start);
unlock(stats.m);
// 3. evaluate
start = now();
if (strat.test_all) {
for (int i = 0; i < trackers.len; i++) {
FitnessJob<T> fj = {&cells[trackers[i].cellid], &trackers[i]};
@@ -251,29 +214,147 @@ template <class T> Stats<T> run(Strategy<T> strat) {
q.done_writing = true;
unlock(q.m);
}
wake_all(q.jobs_ready);
// Wait until the work is finished
lock(q.m);
if (!q.work_complete)
wait(q.done, q.m, infinite_ts);
unlock(q.m);
lock(stats.m);
append(stats.fitness_time, now() - start);
unlock(stats.m);
// 4. sort
std::sort(&trackers[0], &trackers[trackers.len-1], [strat](CellTracker &a, CellTracker &b){ return strat.higher_fitness_is_better ? a.score > b.score : a.score < b.score; });
start = now();
std::sort(&trackers[0], &trackers[trackers.len-1], [strat](CellTracker &a, CellTracker &b){ return better(strat.higher_fitness_is_better, a.score, b.score) == a.score; });
lock(stats.m);
append(stats.sorting_time, now() - start);
printf("Gen: %d, Best Score: %f\n", gen, trackers[0].score);
stats.best_cell.push_back(cells[trackers[0].cellid]);
stats.best_cell_fitness.push_back(trackers[0].score);
append(stats.best_cells, cells[trackers[0].cellid]);
append(stats.best_cell_fitness, trackers[0].score);
append(stats.gen_time, now() - gen_start);
stats.gen++;
unlock(stats.m);
}
stats.done = true;
return 0;
}
template <class T> T run(Strategy<T> strat) {
Array<Stats<T>> stats = make_array<Stats<T>>(strat.num_threads);
Array<Thread> threads = make_array<Thread>(strat.num_threads);
Array<WorkerThreadArgs<T>> args = make_array<WorkerThreadArgs<T>>(strat.num_threads);
float best_global_score = strat.higher_fitness_is_better ? FLT_MIN : FLT_MAX;
T best_global_cell;
allow_all_processors();
set_affinity(0);
for (int i = 0; i < strat.num_threads; i++) {
stats[i] = {
.best_cells=make_dynarray<T>(strat.num_generations),
.best_cell_fitness=make_dynarray<float>(strat.num_generations),
.gen_time=make_dynarray<TimeSpan>(strat.num_generations),
.crossover_time=make_dynarray<TimeSpan>(strat.num_generations),
.mutate_time=make_dynarray<TimeSpan>(strat.num_generations),
.fitness_time=make_dynarray<TimeSpan>(strat.num_generations),
.sorting_time=make_dynarray<TimeSpan>(strat.num_generations),
.m=make_mutex()
};
Array<T> cells = make_array<T>(strat.num_threads*strat.num_cells_per_thread);
Array<CellTracker> trackers = make_array<CellTracker>(strat.num_cells_per_thread);
for (int i = 0; i < strat.num_cells_per_thread; i++) {
cells[i] = strat.make_default_cell();
trackers[i] = {0, i};
}
q.stop = true;
wake_all(q.jobs_ready);
// TODO: join all threads
args[i].strat=strat;
args[i].cells=cells;
args[i].trackers=trackers;
args[i].stats=&stats[i];
args[i].best_global_score=&best_global_score;
args[i].best_global_cell=&best_global_cell;
args[i].m = make_mutex();
// TODO: There's some data freeing that should really be done here
stats.run_time = now() - start_algo;
return stats;
threads[i] = make_thread(worker<T>, &args[i], i+1);
}
// We are the stats thread
bool complete = false;
while (!complete) {
sleep(from_s(strat.stats_print_period_s));
printf("**********************\n");
float g_avg_gen_time = 0;
float g_avg_crossover_time = 0;
float g_avg_mutate_time = 0;
float g_avg_fitness_time = 0;
float g_avg_sorting_time = 0;
float g_avg_overhead_time = 0;
float g_progress_per = 0;
float g_best_fitness = strat.higher_fitness_is_better ? FLT_MIN : FLT_MAX;
complete = true;
for (int i = 0; i < stats.len; i++) {
lock(stats[i].m);
complete &= stats[i].done;
int end = stats[i].gen_time.end-1;
float gen_time = to_s(stats[i].gen_time[end]);
float crossover_time = to_s(stats[i].crossover_time[end]);
float mutate_time = to_s(stats[i].mutate_time[end]);
float fitness_time = to_s(stats[i].fitness_time[end]);
float sorting_time = to_s(stats[i].sorting_time[end]);
float progress_per = static_cast<float>(stats[i].gen) / static_cast<float>(strat.num_generations) * 100;
float best_score = back(stats[i].best_cell_fitness);
float overhead = max(0, gen_time - (crossover_time + mutate_time + fitness_time + sorting_time));
float overhead_per = overhead / gen_time * 100;
g_avg_gen_time += gen_time;
g_avg_crossover_time += crossover_time;
g_avg_mutate_time += mutate_time;
g_avg_fitness_time += fitness_time;
g_avg_sorting_time += sorting_time;
g_progress_per += progress_per;
g_best_fitness = better(strat.higher_fitness_is_better, best_score, g_best_fitness);
g_avg_overhead_time += overhead;
printf("%d, Progress %d/%d, Top: %.5e, Overhead Per: %.4f%%, Gen: %.4f, Overhead: %.4f, Cross: %.4f (s), Mutate: %.4f (s), Fitness: %.4f (s), Sorting: %.4f (s)\n", i, stats[i].gen, strat.num_generations, best_score, overhead_per, gen_time, overhead, crossover_time, mutate_time, fitness_time, sorting_time);
unlock(stats[i].m);
}
g_avg_gen_time /= stats.len;
g_avg_crossover_time /= stats.len;
g_avg_mutate_time /= stats.len;
g_avg_fitness_time /= stats.len;
g_avg_sorting_time /= stats.len;
g_progress_per /= stats.len;
g_avg_overhead_time /= stats.len;
float g_avg_overhead_per = g_avg_overhead_time / g_avg_gen_time * 100;
printf("GLOBAL, Progress %.1f%%, Top: %.5e, Overhead Per: %.4f%%, Gen: %.4f, Overhead: %.4f, Cross: %.4f (s), Mutate: %.4f (s), Fitness: %.4f (s), Sorting: %.4f (s)\n", g_progress_per, g_best_fitness, g_avg_overhead_per, g_avg_gen_time, g_avg_overhead_time, g_avg_crossover_time, g_avg_mutate_time, g_avg_fitness_time, g_avg_sorting_time);
if (complete) break;
}
for (int i = 0; i < threads.len; i++) {
join(threads[i]);
}
T best_cell;
// TODO: bad
float best_score = strat.higher_fitness_is_better ? FLT_MIN : FLT_MAX;
for (int i = 0; i < stats.len; i++) {
float score = back(stats[i].best_cell_fitness);
if (strat.higher_fitness_is_better ? score > best_score : score < best_score) {
best_cell = back(stats[i].best_cells);
best_score = score;
}
}
return best_cell;
}
template<class T> WorkQueue<T> make_work_queue(int len, int batch_size) {

View File

@@ -1,7 +1,11 @@
#pragma once
#include <cassert>
#include <cstdint>
#include <cstdio>
#ifdef _WIN32
#include "windows.h"
#include <windows.h>
#endif
namespace sync {
@@ -17,6 +21,14 @@ typedef LPVOID ThreadArg;
const TimeSpan infinite_ts = { .QuadPart=LLONG_MAX };
int get_num_cores() {
SYSTEM_INFO sysinfo;
GetSystemInfo(&sysinfo);
return sysinfo.dwNumberOfProcessors;
}
const int num_cores = get_num_cores();
LARGE_INTEGER _init_freq() {
LARGE_INTEGER freq;
QueryPerformanceFrequency(&freq);
@@ -27,7 +39,13 @@ static LARGE_INTEGER freq = _init_freq();
#endif
Thread make_thread(ThreadFunc t, ThreadArg a);
Thread make_thread(ThreadFunc t, ThreadArg a, int core_affinity);
void join(Thread t);
void sleep(TimeSpan ts);
void allow_all_processors();
void set_affinity(Thread &t, int core);
void set_affinity(int core);
int get_affinity();
Mutex make_mutex();
void lock(Mutex &m);
@@ -52,23 +70,106 @@ TimeSpan from_min(double minutes);
TimeSpan from_hours(double hours);
TimeSpan now();
TimeSpan operator-(const TimeSpan &a, const TimeSpan &b);
TimeSpan operator+(const TimeSpan &a, const TimeSpan &b);
TimeSpan operator*(const TimeSpan &a, const TimeSpan &b);
TimeSpan operator/(const TimeSpan &a, const TimeSpan &b);
double to_ms(TimeSpan &sp);
double to_s(TimeSpan &sp);
double to_min(TimeSpan &sp);
double to_hours(TimeSpan &sp);
double to_ms(TimeSpan &ts);
double to_s(TimeSpan &ts);
double to_min(TimeSpan &ts);
double to_hours(TimeSpan &ts);
#ifdef _WIN32
uint64_t bitmask (unsigned short n) {
if (n == 64) return -((uint64_t)1);
return (((uint64_t) 1) << n) - 1;
}
const int tab64[64] = {
63, 0, 58, 1, 59, 47, 53, 2,
60, 39, 48, 27, 54, 33, 42, 3,
61, 51, 37, 40, 49, 18, 28, 20,
55, 30, 34, 11, 43, 14, 22, 4,
62, 57, 46, 52, 38, 26, 32, 41,
50, 36, 17, 19, 29, 10, 13, 21,
56, 45, 25, 31, 35, 16, 9, 12,
44, 24, 15, 8, 23, 7, 6, 5};
int log2_64 (uint64_t value)
{
value |= value >> 1;
value |= value >> 2;
value |= value >> 4;
value |= value >> 8;
value |= value >> 16;
value |= value >> 32;
return tab64[((uint64_t)((value - (value >> 1))*0x07EDD5E59A4E28C2)) >> 58];
}
Thread make_thread(ThreadFunc f, ThreadArg a) {
DWORD tid;
return CreateThread(NULL, 0, f, a, 0, &tid);
}
struct DummyThreadArgs {
int core_affinity;
ThreadFunc f;
ThreadArg a;
};
DWORD _dummy_thread(LPVOID a) {
DummyThreadArgs *wrap = static_cast<DummyThreadArgs*>(a);
set_affinity(wrap->core_affinity);
return wrap->f(wrap->a);
}
Thread make_thread(ThreadFunc f, ThreadArg a, int core_affinity) {
DWORD tid;
DummyThreadArgs *args = (DummyThreadArgs*)malloc(sizeof(DummyThreadArgs));
*args = {
.core_affinity=core_affinity,
.f=f,
.a=a
};
return CreateThread(NULL, 0, _dummy_thread, args, 0, &tid);
}
void join(Thread t) {
WaitForSingleObject(t, INFINITE);
}
void sleep(TimeSpan ts) {
Sleep(static_cast<DWORD>(to_ms(ts)));
}
void allow_all_processors() {
Thread t = GetCurrentThread();
DWORD affinity = bitmask(num_cores);
SetProcessAffinityMask(t, affinity);
}
void set_affinity(Thread &t, int core) {
DWORD mask = 1 << (core % num_cores);
DWORD old = SetThreadAffinityMask(t, mask);
DWORD confirm = SetThreadAffinityMask(t, mask);
assert(old && GetLastError() != ERROR_INVALID_PARAMETER && mask == confirm);
}
void set_affinity(int core) {
Thread cur = GetCurrentThread();
set_affinity(cur, core);
}
int get_affinity() {
Thread t = GetCurrentThread();
DWORD mask = 1;
DWORD affinity = SetThreadAffinityMask(t, (DWORD_PTR)mask);
DWORD check = SetThreadAffinityMask(t, (DWORD_PTR)affinity);
assert(check == mask);
return log2_64(affinity);
}
Mutex make_mutex() {
Mutex m;
InitializeCriticalSection(&m);
@@ -135,25 +236,25 @@ void dispose(Semaphore &s) {
TimeSpan from_ms(double milliseconds) {
TimeSpan ts;
ts.QuadPart = static_cast<int64_t>(milliseconds/1000.0)*freq.QuadPart;
ts.QuadPart = static_cast<LONGLONG>(milliseconds/1000.0)*freq.QuadPart;
return ts;
}
TimeSpan from_s(double seconds) {
TimeSpan ts;
ts.QuadPart = static_cast<int64_t>(seconds)*freq.QuadPart;
ts.QuadPart = static_cast<LONGLONG>(seconds)*freq.QuadPart;
return ts;
}
TimeSpan from_min(double minutes) {
TimeSpan ts;
ts.QuadPart = static_cast<int64_t>(minutes*60.0)*freq.QuadPart;
ts.QuadPart = static_cast<LONGLONG>(minutes*60.0)*freq.QuadPart;
return ts;
}
TimeSpan from_hours(double hours) {
TimeSpan ts;
ts.QuadPart = static_cast<int64_t>(hours*60.0*60.0)*freq.QuadPart;
ts.QuadPart = static_cast<LONGLONG>(hours*60.0*60.0)*freq.QuadPart;
return ts;
}
@@ -163,26 +264,44 @@ TimeSpan now() {
return ts;
}
TimeSpan operator-(const TimeSpan &a, TimeSpan &b) {
TimeSpan operator-(const TimeSpan &a, const TimeSpan &b) {
TimeSpan ts;
ts.QuadPart = a.QuadPart - b.QuadPart;
return ts;
}
double to_ms(TimeSpan &sp) {
return static_cast<double>(sp.QuadPart*1000)/static_cast<double>(freq.QuadPart);
TimeSpan operator+(const TimeSpan &a, const TimeSpan &b) {
TimeSpan ts;
ts.QuadPart = a.QuadPart + b.QuadPart;
return ts;
}
double to_s(TimeSpan &sp) {
return static_cast<double>(sp.QuadPart)/static_cast<double>(freq.QuadPart);
TimeSpan operator*(const TimeSpan &a, const TimeSpan &b) {
TimeSpan ts;
ts.QuadPart = a.QuadPart * b.QuadPart;
return ts;
}
double to_min(TimeSpan &sp) {
return static_cast<double>(sp.QuadPart)/static_cast<double>(freq.QuadPart*60);
TimeSpan operator/(const TimeSpan &a, const TimeSpan &b) {
TimeSpan ts;
ts.QuadPart = a.QuadPart / b.QuadPart;
return ts;
}
double to_hours(TimeSpan &sp) {
return static_cast<double>(sp.QuadPart)/static_cast<double>(freq.QuadPart*60*60);
double to_ms(TimeSpan &ts) {
return static_cast<double>(ts.QuadPart*1000)/static_cast<double>(freq.QuadPart);
}
double to_s(TimeSpan &ts) {
return static_cast<double>(ts.QuadPart)/static_cast<double>(freq.QuadPart);
}
double to_min(TimeSpan &ts) {
return static_cast<double>(ts.QuadPart)/static_cast<double>(freq.QuadPart*60);
}
double to_hours(TimeSpan &ts) {
return static_cast<double>(ts.QuadPart)/static_cast<double>(freq.QuadPart*60*60);
}
#endif

56
inc/util.h Normal file
View File

@@ -0,0 +1,56 @@
#pragma once
#include <cstring>
#define min(A, B) ((A < B) ? (A) : (B))
#define max(A, B) ((A > B) ? (A) : (B))
#define better(GT, A, B) (GT ? max((A), (B)) : min((A), (B)))
template <class T> struct Array {
T *data;
int len;
T &operator[](int i) { return data[i]; }
};
template <class T> Array<T> make_array(int len) {
return {
.data=(T*)malloc(sizeof(T)*len),
.len=len
};
}
template <class T> T back(Array<T> &a) { return a.data[a.len-1]; }
template <class T> T front(Array<T> &a) { return a.data[0]; }
template <class T> struct DynArray {
T* _data;
int end;
int cap;
T &operator[](int i) { return _data[i]; }
};
template <class T> DynArray<T> make_dynarray(int cap) {
return {
._data=(T*)malloc(sizeof(T)*cap),
.end=0,
.cap=cap
};
}
template <class T> void resize(DynArray<T> &a, int new_cap) {
T* old = a._data;
a._data = (T*)malloc(sizeof(T)*new_cap);
memcpy(a._data, old, min(sizeof(T)*a.end, sizeof(T)*new_cap));
a.cap = new_cap;
free(old);
}
template <class T> void append(DynArray<T> &a, T el) {
if (a.end == a.cap) resize(a, min(1, a.cap*2));
a[a.end++] = el;
}
template <class T> T back(DynArray<T> &a) { return a._data[a.end-1]; }
template <class T> T front(DynArray<T> &a) { return a._data[0]; }

View File

@@ -3,6 +3,7 @@
#include <cstdlib>
#include "genetic.h"
#include "rand.h"
#include "sync.h"
using namespace genetic;
@@ -40,9 +41,6 @@ void crossover(const Array<Array<float>*> parents, const Array<Array<float> *> o
}
}
// norm_rand can go negative. fix in genetic.cpp
// child stride doesn't make sense. Should always skip over child num
float fitness(const Array<float> &cell) {
float sum = 0;
float product = 1;
@@ -54,12 +52,14 @@ float fitness(const Array<float> &cell) {
}
int main(int argc, char **argv) {
int num_gens = 2000;
int num_gens = 10000;
Strategy<Array<float>> strat {
.num_threads = 15,
.batch_size = 1000,
.num_cells = 100000,
.num_threads = atoi(argv[1]),
.stats_print_period_s = 2,
.num_cells_per_thread = 100000,
.num_generations = num_gens,
.share_breakthroughs=true,
.share_breakthrough_gen_period=10,
.test_all = true,
.test_chance = 0.0, // doesn't matter
.enable_crossover = true,
@@ -76,13 +76,15 @@ int main(int argc, char **argv) {
.fitness=fitness
};
auto res = run(strat);
TimeSpan start = now();
auto best_cell = run(strat);
TimeSpan runtime = now() - start;
float sum = 0;
float product = 1;
printf("Winning cell: ");
for (int i = 0; i < res.best_cell.back().len; i++) {
float val = res.best_cell.back()[i];
for (int i = 0; i < best_cell.len; i++) {
float val = best_cell[i];
sum += val;
product *= val;
printf("%f ", val);
@@ -90,7 +92,5 @@ int main(int argc, char **argv) {
printf("\n");
printf("Final Sum: %f\n", sum);
printf("Final Product: %f\n", product);
printf("Setup Time (s): %f\n", sync::to_s(res.setup_time));
printf("Run Time (s): %f\n", sync::to_s(res.run_time));
printf("Average Gen Time (s): %f\n", sync::to_s(res.run_time)/num_gens);
printf("Execution Time %d (min) %f (s)\n", static_cast<int>(sync::to_min(runtime)), fmod(to_s(runtime), 60) );
}