mirror of
https://github.com/Jaxan/hybrid-ads.git
synced 2025-04-27 23:17:44 +02:00
Rewrites main to log runtimes, output checking seqs (compressed)
This commit is contained in:
parent
30d1d0e5c5
commit
4a2bc674cc
9 changed files with 414 additions and 47 deletions
3
.gitignore
vendored
3
.gitignore
vendored
|
@ -1,4 +1,5 @@
|
|||
*.dot
|
||||
*.png
|
||||
*dist_seq
|
||||
*.pdf
|
||||
*_seq
|
||||
*splitting_tree
|
||||
|
|
|
@ -3,7 +3,7 @@ cmake_minimum_required(VERSION 2.8)
|
|||
|
||||
set(CMAKE_CXX_FLAGS "${CMAKE_CXX_FLAGS} -std=c++1y")
|
||||
|
||||
find_package(Boost REQUIRED COMPONENTS program_options filesystem system serialization)
|
||||
find_package(Boost REQUIRED COMPONENTS iostreams program_options filesystem system serialization)
|
||||
include_directories(SYSTEM ${Boost_INCLUDE_DIRS})
|
||||
set(libs ${libs} ${Boost_LIBRARIES})
|
||||
|
||||
|
|
|
@ -15,7 +15,7 @@ std::vector<T> concat(std::vector<T> const & l, std::vector<T> const & r){
|
|||
return ret;
|
||||
}
|
||||
|
||||
result create_splitting_tree(const Mealy& g){
|
||||
result create_splitting_tree(const Mealy& g, options opt){
|
||||
const auto N = g.graph.size();
|
||||
const auto P = g.input_indices.size();
|
||||
const auto Q = g.output_indices.size();
|
||||
|
@ -47,7 +47,9 @@ result create_splitting_tree(const Mealy& g){
|
|||
|
||||
assert(boom.states.size() == accumulate(begin(boom.children), end(boom.children), 0, [](auto l, auto r) { return l + r.states.size(); }));
|
||||
};
|
||||
const auto is_valid = [N, &g](auto blocks, auto symbol){
|
||||
const auto is_valid = [N, opt, &g](auto blocks, auto symbol){
|
||||
if(!opt.check_validity) return true;
|
||||
|
||||
for(auto && block : blocks) {
|
||||
const auto new_blocks = partition_(begin(block), end(block), [symbol, &g](state state){
|
||||
return apply(g, state, symbol).to.base();
|
||||
|
|
|
@ -23,4 +23,11 @@ struct result {
|
|||
bool is_complete;
|
||||
};
|
||||
|
||||
result create_splitting_tree(Mealy const & m);
|
||||
struct options {
|
||||
bool check_validity = true;
|
||||
};
|
||||
|
||||
constexpr options with_validity_check{true};
|
||||
constexpr options without_validity_check{false};
|
||||
|
||||
result create_splitting_tree(Mealy const & m, options opt);
|
||||
|
|
|
@ -1,10 +1,36 @@
|
|||
#pragma once
|
||||
|
||||
#include <mutex>
|
||||
#include <chrono>
|
||||
#include <iostream>
|
||||
|
||||
// Works particularly nice with lambda's, as they give naturally unique types :)
|
||||
template <typename F>
|
||||
void fire_once(F && f){
|
||||
static std::once_flag flag;
|
||||
std::call_once(flag, f);
|
||||
}
|
||||
struct timer{
|
||||
using clock = std::chrono::high_resolution_clock;
|
||||
using time = std::chrono::time_point<clock>;
|
||||
using seconds = std::chrono::duration<double>;
|
||||
|
||||
std::string name;
|
||||
time begin;
|
||||
bool active = true;
|
||||
|
||||
timer(std::string name)
|
||||
: name(name)
|
||||
, begin(clock::now())
|
||||
{
|
||||
std::cerr << name << std::endl;
|
||||
}
|
||||
|
||||
void stop(){
|
||||
if(!active) return;
|
||||
time end = clock::now();
|
||||
std::cerr << "* " << from_duration(end - begin) << '\t' << name << std::endl;
|
||||
active = false;
|
||||
}
|
||||
|
||||
~timer(){
|
||||
stop();
|
||||
}
|
||||
|
||||
static double from_duration(seconds s){
|
||||
return s.count();
|
||||
}
|
||||
};
|
||||
|
|
46
scripts.md
Normal file
46
scripts.md
Normal file
|
@ -0,0 +1,46 @@
|
|||
all
|
||||
===
|
||||
|
||||
|
||||
for f in *.dot; do ../build/main $f; done
|
||||
for f in *splitting_tree; do sed -i "" -e 's/label="........................................*"/label="truncated"/g' $f; done
|
||||
for f in *dist_seq; do sed -i "" -e 's/label="........................................*"/label="truncated"/g' $f; done
|
||||
dot -O -Tpng -Goverlap=false *dist_seq
|
||||
dot -O -Tpng -Goverlap=false *splitting_tree
|
||||
|
||||
|
||||
graphs
|
||||
======
|
||||
|
||||
dot -O -Tpng -Goverlap=false *dist_seq
|
||||
dot -O -Tpng -Goverlap=false *splitting_tree
|
||||
|
||||
neato -O -Tpng -Goverlap=false *.dot
|
||||
|
||||
|
||||
truncation
|
||||
==========
|
||||
|
||||
sed 's/label="........................................*"/label="truncated"/g' esm-manual-controller.dot.splitting_tree.dot > esm-manual-controller.dot.splitting_tree_truncated.dot
|
||||
|
||||
for f in *splitting_tree; do echo $f; sed -i "" -e 's/label="........................................*"/label="truncated"/g' $f; done
|
||||
for f in *dist_seq; do echo $f; sed -i "" -e 's/label="........................................*"/label="truncated"/g' $f; done
|
||||
|
||||
|
||||
cleaning
|
||||
========
|
||||
|
||||
sed 's/\[label.*<td>I/ \[label="/g;s/<\/td>.*<td>O/ \/ /g;s/<\/td>.*]/"\];/g'
|
||||
|
||||
|
||||
not needed ctor
|
||||
===============
|
||||
|
||||
partition_refine(Blocks other_blocks){
|
||||
auto beg = elements.begin();
|
||||
for(auto && block : other_blocks){
|
||||
std::copy(block.begin(), block.end(), std::back_inserter(elements));
|
||||
blocks.insert(blocks.end(), {beg, std::prev(elements.end())});
|
||||
beg = elements.end();
|
||||
}
|
||||
}
|
50
src/conf.cpp
Normal file
50
src/conf.cpp
Normal file
|
@ -0,0 +1,50 @@
|
|||
#include <mealy.hpp>
|
||||
#include <read_mealy_from_dot.hpp>
|
||||
|
||||
#include <boost/iostreams/device/file_descriptor.hpp>
|
||||
#include <boost/iostreams/filtering_stream.hpp>
|
||||
#include <boost/iostreams/filter/gzip.hpp>
|
||||
|
||||
#include <fstream>
|
||||
#include <iostream>
|
||||
#include <string>
|
||||
|
||||
using namespace std;
|
||||
|
||||
int main(int argc, char *argv[]){
|
||||
if(argc != 3) return 37;
|
||||
|
||||
const string m_filename = argv[1];
|
||||
const string c_filename = argv[2];
|
||||
|
||||
ifstream m_file(m_filename);
|
||||
boost::iostreams::filtering_istream c_file;
|
||||
c_file.push(boost::iostreams::gzip_decompressor());
|
||||
c_file.push(boost::iostreams::file_descriptor_source(c_filename));
|
||||
|
||||
const auto machine = read_mealy_from_dot(m_file);
|
||||
|
||||
string in;
|
||||
string out;
|
||||
|
||||
state s = 0;
|
||||
size_t count = 0;
|
||||
while(c_file >> in >> out){
|
||||
const auto i = machine.input_indices.at(in);
|
||||
const auto o = machine.output_indices.at(out);
|
||||
|
||||
const auto ret = apply(machine, s, i);
|
||||
if(ret.output != o){
|
||||
cout << "conformance fail" << endl;
|
||||
cout << ret.output << " != " << o << endl;
|
||||
cout << "at index " << count << endl;
|
||||
return 1;
|
||||
}
|
||||
|
||||
s = ret.to;
|
||||
count++;
|
||||
}
|
||||
|
||||
cout << "conformance succes " << count << endl;
|
||||
}
|
||||
|
283
src/main.cpp
283
src/main.cpp
|
@ -2,6 +2,11 @@
|
|||
#include <create_splitting_tree.hpp>
|
||||
#include <read_mealy_from_dot.hpp>
|
||||
#include <write_tree_to_dot.hpp>
|
||||
#include <logging.hpp>
|
||||
|
||||
#include <boost/iostreams/device/file_descriptor.hpp>
|
||||
#include <boost/iostreams/filtering_stream.hpp>
|
||||
#include <boost/iostreams/filter/gzip.hpp>
|
||||
|
||||
#include <cassert>
|
||||
#include <iostream>
|
||||
|
@ -10,59 +15,267 @@
|
|||
#include <functional>
|
||||
#include <vector>
|
||||
#include <utility>
|
||||
#include <fstream>
|
||||
|
||||
using namespace std;
|
||||
|
||||
template <typename T>
|
||||
vector<string> create_reverse_map(map<string, T> const & indices){
|
||||
vector<string> ret(indices.size());
|
||||
for(auto&& p : indices){
|
||||
ret[p.second.base()] = p.first;
|
||||
}
|
||||
return ret;
|
||||
}
|
||||
|
||||
auto bfs(Mealy const & machine, state s){
|
||||
vector<bool> visited(machine.graph_size, false);
|
||||
vector<vector<input>> words(machine.graph_size);
|
||||
|
||||
queue<state> work;
|
||||
work.push(s);
|
||||
while(!work.empty()){
|
||||
const auto u = work.front();
|
||||
work.pop();
|
||||
|
||||
if(visited[u.base()]) continue;
|
||||
|
||||
visited[u.base()] = true;
|
||||
|
||||
for(input i = 0; i < machine.input_size; ++i){
|
||||
const auto v = apply(machine, u, i).to;
|
||||
if(visited[v.base()]) continue;
|
||||
|
||||
words[v.base()] = words[u.base()];
|
||||
words[v.base()].push_back(i);
|
||||
work.push(v);
|
||||
}
|
||||
}
|
||||
|
||||
return words;
|
||||
}
|
||||
|
||||
int main(int argc, char *argv[]){
|
||||
if(argc != 2) return 1;
|
||||
const string filename = argv[1];
|
||||
|
||||
cerr << "* Reading file " << filename << "\n";
|
||||
const auto machine = read_mealy_from_dot(filename);
|
||||
assert(is_complete(machine));
|
||||
cerr << "\tdone\n";
|
||||
const auto machine = [&]{
|
||||
timer t("reading file " + filename);
|
||||
return read_mealy_from_dot(filename);
|
||||
}();
|
||||
|
||||
cerr << "* Starting Lee & Yannakakis I\n";
|
||||
const auto splitting_tree = create_splitting_tree(machine);
|
||||
cerr << "\tdone\n";
|
||||
const auto splitting_tree_hopcroft = [&]{
|
||||
timer t("creating hopcroft splitting tree");
|
||||
return create_splitting_tree(machine, without_validity_check);
|
||||
}();
|
||||
|
||||
cerr << "* Write splitting tree\n";
|
||||
const string tree_filename = splitting_tree.is_complete ? (filename + ".splitting_tree") : (filename + ".incomplete_splitting_tree");
|
||||
write_splitting_tree_to_dot(splitting_tree.root, tree_filename);
|
||||
cerr << "\tdone\n";
|
||||
const auto all_pair_seperating_sequences = [&]{
|
||||
timer t("gathering all seperating sequences");
|
||||
|
||||
cerr << "* Lee and Yannakaki II\n";
|
||||
const auto distinguishing_sequence = create_adaptive_distinguishing_sequence(splitting_tree);
|
||||
cerr << "\tdone\n";
|
||||
vector<vector<vector<input>>> all_pair_seperating_sequences(machine.graph_size, vector<vector<input>>(machine.graph_size));
|
||||
|
||||
cerr << "* Write dist sequence\n";
|
||||
const string dseq_filename = splitting_tree.is_complete ? (filename + ".dist_seq") : (filename + ".incomplete_dist_seq");
|
||||
write_adaptive_distinguishing_sequence_to_dot(distinguishing_sequence.sequence, dseq_filename);
|
||||
cerr << "\tdone\n" << endl;
|
||||
queue<reference_wrapper<const splijtboom>> work;
|
||||
work.push(splitting_tree_hopcroft.root);
|
||||
|
||||
vector<vector<input>> uios(splitting_tree.root.states.size());
|
||||
stack<pair<vector<input>, reference_wrapper<const dist_seq>>> work;
|
||||
work.push({{}, distinguishing_sequence.sequence});
|
||||
// total complexity is O(n^2), as we're visiting each pair only once :)
|
||||
while(!work.empty()){
|
||||
const splijtboom & node = work.front();
|
||||
work.pop();
|
||||
|
||||
while(!work.empty()){
|
||||
auto word = work.top().first;
|
||||
const dist_seq & node = work.top().second;
|
||||
work.pop();
|
||||
auto it = begin(node.children);
|
||||
auto ed = end(node.children);
|
||||
|
||||
if(node.CI.size() == 1){
|
||||
const auto state = node.CI[0].second;
|
||||
uios[state.base()] = word;
|
||||
continue;
|
||||
while(it != ed){
|
||||
auto jt = next(it);
|
||||
while(jt != ed){
|
||||
for(auto && s : it->states){
|
||||
for(auto && t : jt->states){
|
||||
assert(all_pair_seperating_sequences[t.base()][s.base()].empty());
|
||||
assert(all_pair_seperating_sequences[s.base()][t.base()].empty());
|
||||
all_pair_seperating_sequences[t.base()][s.base()] = node.seperator;
|
||||
all_pair_seperating_sequences[s.base()][t.base()] = node.seperator;
|
||||
}
|
||||
}
|
||||
jt++;
|
||||
}
|
||||
it++;
|
||||
}
|
||||
|
||||
for(auto && c : node.children){
|
||||
work.push(c);
|
||||
}
|
||||
}
|
||||
|
||||
for(auto && i : node.word)
|
||||
word.push_back(i);
|
||||
for(size_t i = 0; i < machine.graph_size; ++i){
|
||||
for(size_t j = 0; j < machine.graph_size; ++j){
|
||||
if(i == j) continue;
|
||||
assert(!all_pair_seperating_sequences[i][j].empty());
|
||||
}
|
||||
}
|
||||
|
||||
for(auto && c : node.children)
|
||||
work.push({word, c});
|
||||
return all_pair_seperating_sequences;
|
||||
}();
|
||||
|
||||
const auto splitting_tree = [&]{
|
||||
timer t("Lee & Yannakakis I");
|
||||
return create_splitting_tree(machine, with_validity_check);
|
||||
}();
|
||||
|
||||
if(false){
|
||||
timer t("writing splitting tree");
|
||||
const string tree_filename = splitting_tree.is_complete ? (filename + ".splitting_tree") : (filename + ".incomplete_splitting_tree");
|
||||
write_splitting_tree_to_dot(splitting_tree.root, tree_filename);
|
||||
}
|
||||
|
||||
size_t uio_count = count_if(begin(uios), end(uios), [](auto && u){ return !u.empty(); });
|
||||
cout << uio_count << " / " << uios.size() << endl;
|
||||
const auto distinguishing_sequence = [&]{
|
||||
timer t("Lee & Yannakakis II");
|
||||
return create_adaptive_distinguishing_sequence(splitting_tree);
|
||||
}();
|
||||
|
||||
if(false){
|
||||
timer t("writing dist sequence");
|
||||
const string dseq_filename = splitting_tree.is_complete ? (filename + ".dist_seq") : (filename + ".incomplete_dist_seq");
|
||||
write_adaptive_distinguishing_sequence_to_dot(distinguishing_sequence.sequence, dseq_filename);
|
||||
}
|
||||
|
||||
const auto seperating_family = [&]{
|
||||
timer t("making seperating family");
|
||||
using Word = vector<input>;
|
||||
using SepSet = vector<Word>;
|
||||
vector<SepSet> seperating_family(machine.graph_size);
|
||||
|
||||
stack<pair<vector<input>, reference_wrapper<const dist_seq>>> work;
|
||||
work.push({{}, distinguishing_sequence.sequence});
|
||||
|
||||
while(!work.empty()){
|
||||
auto word = work.top().first;
|
||||
const dist_seq & node = work.top().second;
|
||||
work.pop();
|
||||
|
||||
if(node.children.empty()){
|
||||
// add sequence to this leave
|
||||
for(auto && p : node.CI){
|
||||
const auto state = p.second;
|
||||
seperating_family[state.base()].push_back(word);
|
||||
}
|
||||
|
||||
// if the leaf is not a singleton, we need the all_pair seperating seqs
|
||||
for(auto && p : node.CI){
|
||||
for(auto && q : node.CI){
|
||||
const auto s = p.second;
|
||||
const auto t = q.second;
|
||||
if(s == t) continue;
|
||||
seperating_family[s.base()].push_back(all_pair_seperating_sequences[s.base()][t.base()]);
|
||||
}
|
||||
}
|
||||
|
||||
continue;
|
||||
}
|
||||
|
||||
for(auto && i : node.word)
|
||||
word.push_back(i);
|
||||
|
||||
for(auto && c : node.children)
|
||||
work.push({word, c});
|
||||
}
|
||||
|
||||
return seperating_family;
|
||||
}();
|
||||
|
||||
const auto inputs = create_reverse_map(machine.input_indices);
|
||||
const auto outputs = create_reverse_map(machine.output_indices);
|
||||
const auto print_uio = [&](auto const & word, auto & out, state s) -> auto & {
|
||||
for(auto && i : word){
|
||||
const auto o = apply(machine, s, i);
|
||||
s = o.to;
|
||||
|
||||
out << inputs[i.base()] << ' ' << outputs[o.output.base()] << '\n';
|
||||
}
|
||||
return out;
|
||||
};
|
||||
|
||||
const auto transfer_sequences = [&]{
|
||||
timer t("determining transfer sequences");
|
||||
vector<vector<vector<input>>> transfer_sequences(machine.graph_size);
|
||||
for(state s = 0; s < machine.graph_size; ++s){
|
||||
transfer_sequences[s.base()] = bfs(machine, s);
|
||||
}
|
||||
return transfer_sequences;
|
||||
}();
|
||||
|
||||
const auto short_checking_seq = [&]{
|
||||
timer t("making short checking seq");
|
||||
vector<input> big_seq;
|
||||
state from = 0;
|
||||
for(state s = from; s < machine.graph_size; ++s){
|
||||
for(const auto & seq : seperating_family[s.base()]){
|
||||
copy(begin(seq), end(seq), back_inserter(big_seq));
|
||||
from = apply(machine, s, begin(seq), end(seq)).to;
|
||||
|
||||
const auto to = s;
|
||||
if(from == to) continue;
|
||||
|
||||
const auto transfer = transfer_sequences[from.base()][to.base()];
|
||||
copy(begin(transfer), end(transfer), back_inserter(big_seq));
|
||||
}
|
||||
|
||||
const auto to = s+1;
|
||||
if(from == to) continue;
|
||||
|
||||
const auto transfer = transfer_sequences[from.base()][to.base()];
|
||||
copy(begin(transfer), end(transfer), back_inserter(big_seq));
|
||||
}
|
||||
|
||||
return big_seq;
|
||||
}();
|
||||
|
||||
{
|
||||
timer t("writing short checking seq");
|
||||
const string uios_filename = filename + ".short_check_seq";
|
||||
|
||||
boost::iostreams::filtering_ostream out;
|
||||
out.push(boost::iostreams::gzip_compressor());
|
||||
out.push(boost::iostreams::file_descriptor_sink(uios_filename));
|
||||
|
||||
print_uio(short_checking_seq, out, 0);
|
||||
}
|
||||
|
||||
const auto long_checking_seq = [&]{
|
||||
timer t("making long checking seq");
|
||||
vector<input> big_seq;
|
||||
state from = 0;
|
||||
for(state s = from; s < machine.graph_size; ++s){
|
||||
for(input i = 0; i < machine.input_size; ++i){
|
||||
const auto t = apply(machine, s, i).to;
|
||||
|
||||
for(auto && seq : seperating_family[t.base()]){
|
||||
if(from != s){
|
||||
const auto transfer = transfer_sequences[from.base()][s.base()];
|
||||
copy(begin(transfer), end(transfer), back_inserter(big_seq));
|
||||
from = s;
|
||||
}
|
||||
|
||||
big_seq.push_back(i);
|
||||
from = t;
|
||||
|
||||
copy(begin(seq), end(seq), back_inserter(big_seq));
|
||||
from = apply(machine, from, begin(seq), end(seq)).to;
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
return big_seq;
|
||||
}();
|
||||
|
||||
{
|
||||
timer t("writing long checking seq");
|
||||
const string uios_filename = filename + ".full_check_seq";
|
||||
|
||||
boost::iostreams::filtering_ostream out;
|
||||
out.push(boost::iostreams::gzip_compressor());
|
||||
out.push(boost::iostreams::file_descriptor_sink(uios_filename));
|
||||
|
||||
print_uio(long_checking_seq, out, 0);
|
||||
}
|
||||
}
|
||||
|
||||
|
|
22
src/stats.cpp
Normal file
22
src/stats.cpp
Normal file
|
@ -0,0 +1,22 @@
|
|||
#include <read_mealy_from_dot.hpp>
|
||||
#include <mealy.hpp>
|
||||
|
||||
#include <string>
|
||||
#include <fstream>
|
||||
#include <iostream>
|
||||
|
||||
using namespace std;
|
||||
|
||||
int main(int argc, char *argv[]){
|
||||
if(argc != 2) return 37;
|
||||
|
||||
const auto filename = argv[1];
|
||||
ifstream file(filename);
|
||||
const auto machine = read_mealy_from_dot(file);
|
||||
|
||||
cout << "machine " << filename << " has\n";
|
||||
cout << '\t' << machine.graph_size << " states\n";
|
||||
cout << '\t' << machine.input_size << " inputs\n";
|
||||
cout << '\t' << machine.output_size << " outputs\n";
|
||||
}
|
||||
|
Loading…
Add table
Reference in a new issue