mirror of
https://github.com/Jaxan/hybrid-ads.git
synced 2025-04-27 15:07:45 +02:00
Refactors a lot. Also implements a basic test suite.
This commit is contained in:
parent
a7a7f815da
commit
1c9f56a6ec
23 changed files with 511 additions and 403 deletions
|
@ -1,5 +1,4 @@
|
|||
#include "create_adaptive_distinguishing_sequence.hpp"
|
||||
#include "create_splitting_tree.hpp"
|
||||
#include "adaptive_distinguishing_sequence.hpp"
|
||||
|
||||
#include <algorithm>
|
||||
#include <cassert>
|
||||
|
@ -8,18 +7,26 @@
|
|||
|
||||
using namespace std;
|
||||
|
||||
distinguishing_sequence create_adaptive_distinguishing_sequence(const result & splitting_tree){
|
||||
adaptive_distinguishing_sequence::adaptive_distinguishing_sequence(size_t N, size_t depth)
|
||||
: CI(N)
|
||||
, depth(depth)
|
||||
{
|
||||
for(size_t i = 0; i < N; ++i)
|
||||
CI[i] = {i, i};
|
||||
}
|
||||
|
||||
adaptive_distinguishing_sequence create_adaptive_distinguishing_sequence(const result & splitting_tree){
|
||||
const auto & root = splitting_tree.root;
|
||||
const auto & succession = splitting_tree.successor_cache;
|
||||
const auto N = root.states.size();
|
||||
|
||||
distinguishing_sequence sequence(N, 0);
|
||||
adaptive_distinguishing_sequence sequence(N, 0);
|
||||
|
||||
queue<reference_wrapper<distinguishing_sequence>> work;
|
||||
queue<reference_wrapper<adaptive_distinguishing_sequence>> work;
|
||||
work.push(sequence);
|
||||
|
||||
while(!work.empty()){
|
||||
distinguishing_sequence & node = work.front();
|
||||
adaptive_distinguishing_sequence & node = work.front();
|
||||
work.pop();
|
||||
|
||||
if(node.CI.size() < 2) continue;
|
||||
|
@ -37,7 +44,7 @@ distinguishing_sequence create_adaptive_distinguishing_sequence(const result & s
|
|||
|
||||
node.word = oboom.seperator;
|
||||
for(auto && c : oboom.children){
|
||||
distinguishing_sequence new_c(0, node.depth + 1);
|
||||
adaptive_distinguishing_sequence new_c(0, node.depth + 1);
|
||||
|
||||
size_t i = 0;
|
||||
size_t j = 0;
|
25
lib/adaptive_distinguishing_sequence.hpp
Normal file
25
lib/adaptive_distinguishing_sequence.hpp
Normal file
|
@ -0,0 +1,25 @@
|
|||
#pragma once
|
||||
|
||||
#include "types.hpp"
|
||||
#include "splitting_tree.hpp"
|
||||
|
||||
#include <utility>
|
||||
|
||||
/*
|
||||
* The adaptive distinguishing sequence as described in Lee & Yannakakis. This
|
||||
* is not a sequence, but a decision tree! It can be constructed from the Lee
|
||||
* & Yannakakis-style splitting tree. We also need some other data produced
|
||||
* by the splitting tree algorithm.
|
||||
*/
|
||||
|
||||
struct adaptive_distinguishing_sequence {
|
||||
adaptive_distinguishing_sequence(size_t N, size_t depth);
|
||||
|
||||
// current, initial
|
||||
std::vector<std::pair<state, state>> CI;
|
||||
std::vector<adaptive_distinguishing_sequence> children;
|
||||
word word;
|
||||
size_t depth;
|
||||
};
|
||||
|
||||
adaptive_distinguishing_sequence create_adaptive_distinguishing_sequence(result const & splitting_tree);
|
|
@ -1,32 +0,0 @@
|
|||
#pragma once
|
||||
|
||||
#include "mealy.hpp"
|
||||
|
||||
#include <vector>
|
||||
#include <utility>
|
||||
|
||||
struct result;
|
||||
struct distinguishing_sequence;
|
||||
|
||||
|
||||
// Creates a distinguishing sequence based on the output of the first algorithm
|
||||
distinguishing_sequence create_adaptive_distinguishing_sequence(result const & splitting_tree);
|
||||
|
||||
|
||||
// The adaptive distinguishing sequence as described in Lee & Yannakakis
|
||||
// This is really a tree!
|
||||
struct distinguishing_sequence {
|
||||
distinguishing_sequence(size_t N, size_t depth)
|
||||
: CI(N)
|
||||
, depth(depth)
|
||||
{
|
||||
for(size_t i = 0; i < N; ++i)
|
||||
CI[i] = {i, i};
|
||||
}
|
||||
|
||||
// current, initial
|
||||
std::vector<std::pair<state, state>> CI;
|
||||
std::vector<input> word;
|
||||
std::vector<distinguishing_sequence> children;
|
||||
size_t depth;
|
||||
};
|
|
@ -1,44 +0,0 @@
|
|||
#pragma once
|
||||
|
||||
#include "mealy.hpp"
|
||||
#include "splitting_tree.hpp"
|
||||
|
||||
#include <vector>
|
||||
|
||||
struct options;
|
||||
struct result;
|
||||
|
||||
|
||||
// Creates a Lee & Yannakakis style splitting tree
|
||||
// Depending on the options it can also create the classical Hopcroft splitting tree
|
||||
result create_splitting_tree(Mealy const & m, options opt);
|
||||
|
||||
|
||||
// The algorithm can be altered in some ways. This struct provides options
|
||||
// to the algorithm
|
||||
struct options {
|
||||
bool check_validity = true;
|
||||
};
|
||||
|
||||
constexpr options with_validity_check{true};
|
||||
constexpr options without_validity_check{false};
|
||||
|
||||
|
||||
// The algorithm constructs more than the splitting tree
|
||||
// We capture the other information as well
|
||||
struct result {
|
||||
result(size_t N)
|
||||
: root(N, 0)
|
||||
, successor_cache()
|
||||
, is_complete(true)
|
||||
{}
|
||||
|
||||
// The splitting tree as described in Lee & Yannakakis
|
||||
splijtboom root;
|
||||
|
||||
// Encodes f_u : depth -> state -> state, where only the depth of u is of importance
|
||||
std::vector<std::vector<state>> successor_cache;
|
||||
|
||||
// false <-> no adaptive distinguishing sequence
|
||||
bool is_complete;
|
||||
};
|
31
lib/io.hpp
Normal file
31
lib/io.hpp
Normal file
|
@ -0,0 +1,31 @@
|
|||
#pragma once
|
||||
|
||||
#include "phantom.hpp"
|
||||
#include "mealy.hpp"
|
||||
|
||||
#include <boost/iostreams/device/file_descriptor.hpp>
|
||||
#include <boost/iostreams/filter/gzip.hpp>
|
||||
#include <boost/iostreams/filtering_stream.hpp>
|
||||
|
||||
#include <boost/serialization/serialization.hpp>
|
||||
#include <boost/serialization/string.hpp>
|
||||
#include <boost/serialization/vector.hpp>
|
||||
|
||||
#include <boost/archive/text_oarchive.hpp>
|
||||
#include <boost/archive/text_iarchive.hpp>
|
||||
#include <boost/archive/binary_oarchive.hpp>
|
||||
#include <boost/archive/binary_iarchive.hpp>
|
||||
|
||||
#include <iostream>
|
||||
#include <fstream>
|
||||
|
||||
namespace boost {
|
||||
namespace serialization {
|
||||
|
||||
template<class Archive, typename B, typename T>
|
||||
void serialize(Archive & ar, phantom<B, T> & value, const unsigned int /*version*/){
|
||||
ar & value.x;
|
||||
}
|
||||
|
||||
} // namespace serialization
|
||||
} // namespace boost
|
|
@ -1,18 +1,11 @@
|
|||
#pragma once
|
||||
|
||||
#include "phantom.hpp"
|
||||
#include "types.hpp"
|
||||
|
||||
#include <map>
|
||||
#include <string>
|
||||
#include <vector>
|
||||
|
||||
/* We use size_t's for easy indexing. But we do not want to mix states and
|
||||
* inputs. We use phantom typing to "generate" distinguished types :).
|
||||
*/
|
||||
using state = phantom<size_t, struct state_tag>;
|
||||
using input = phantom<size_t, struct input_tag>;
|
||||
using output = phantom<size_t, struct output_tag>;
|
||||
|
||||
/*
|
||||
* Structure used for reading mealy files from dot files.
|
||||
* Everything is indexed by size_t's, so that we can index vectors
|
||||
|
@ -20,7 +13,7 @@ using output = phantom<size_t, struct output_tag>;
|
|||
* to these size_t's. Can only represent deterministic machines,
|
||||
* but partiality still can occur.
|
||||
*/
|
||||
struct Mealy {
|
||||
struct mealy {
|
||||
struct edge {
|
||||
state to = -1;
|
||||
output output = -1;
|
||||
|
@ -39,7 +32,7 @@ struct Mealy {
|
|||
size_t output_size = 0;
|
||||
};
|
||||
|
||||
inline auto is_complete(const Mealy & m){
|
||||
inline auto is_complete(const mealy & m){
|
||||
for(state n = 0; n < m.graph_size; ++n){
|
||||
if(m.graph[n.base()].size() != m.input_size) return false;
|
||||
for(auto && e : m.graph[n.base()]) if(e.to == -1 || e.output == -1) return false;
|
||||
|
@ -47,13 +40,13 @@ inline auto is_complete(const Mealy & m){
|
|||
return true;
|
||||
}
|
||||
|
||||
inline auto apply(Mealy const & m, state state, input input){
|
||||
inline auto apply(mealy const & m, state state, input input){
|
||||
return m.graph[state.base()][input.base()];
|
||||
}
|
||||
|
||||
template <typename Iterator>
|
||||
auto apply(Mealy const & m, state state, Iterator b, Iterator e){
|
||||
Mealy::edge ret;
|
||||
auto apply(mealy const & m, state state, Iterator b, Iterator e){
|
||||
mealy::edge ret;
|
||||
while(b != e){
|
||||
ret = apply(m, state, *b++);
|
||||
state = ret.to;
|
||||
|
|
|
@ -1,6 +1,7 @@
|
|||
#pragma once
|
||||
|
||||
#include <boost/operators.hpp>
|
||||
|
||||
#include <iosfwd>
|
||||
|
||||
template <typename Base, typename T>
|
||||
|
|
|
@ -14,8 +14,8 @@ T get(istream& in){
|
|||
return t;
|
||||
}
|
||||
|
||||
Mealy read_mealy_from_dot(istream& in){
|
||||
Mealy m;
|
||||
mealy read_mealy_from_dot(istream& in){
|
||||
mealy m;
|
||||
|
||||
string line;
|
||||
stringstream ss;
|
||||
|
@ -57,7 +57,7 @@ Mealy read_mealy_from_dot(istream& in){
|
|||
return m;
|
||||
}
|
||||
|
||||
Mealy read_mealy_from_dot(const string& filename){
|
||||
mealy read_mealy_from_dot(const string& filename){
|
||||
ifstream file(filename);
|
||||
return read_mealy_from_dot(file);
|
||||
}
|
||||
|
|
|
@ -2,6 +2,6 @@
|
|||
|
||||
#include <iosfwd>
|
||||
|
||||
struct Mealy;
|
||||
Mealy read_mealy_from_dot(const std::string & filename);
|
||||
Mealy read_mealy_from_dot(std::istream & input);
|
||||
struct mealy;
|
||||
mealy read_mealy_from_dot(const std::string & filename);
|
||||
mealy read_mealy_from_dot(std::istream & input);
|
||||
|
|
48
lib/seperating_family.cpp
Normal file
48
lib/seperating_family.cpp
Normal file
|
@ -0,0 +1,48 @@
|
|||
#include "seperating_family.hpp"
|
||||
|
||||
#include <functional>
|
||||
#include <stack>
|
||||
#include <utility>
|
||||
|
||||
using namespace std;
|
||||
|
||||
seperating_family create_seperating_family(const adaptive_distinguishing_sequence & sequence, const seperating_matrix & all_pair_seperating_sequences){
|
||||
seperating_family seperating_family(all_pair_seperating_sequences.size());
|
||||
|
||||
stack<pair<word, reference_wrapper<const adaptive_distinguishing_sequence>>> work;
|
||||
work.push({{}, sequence});
|
||||
|
||||
while(!work.empty()){
|
||||
auto word = work.top().first;
|
||||
const adaptive_distinguishing_sequence & node = work.top().second;
|
||||
work.pop();
|
||||
|
||||
if(node.children.empty()){
|
||||
// add sequence to this leave
|
||||
for(auto && p : node.CI){
|
||||
const auto state = p.second;
|
||||
seperating_family[state.base()].push_back(word);
|
||||
}
|
||||
|
||||
// if the leaf is not a singleton, we need the all_pair seperating seqs
|
||||
for(auto && p : node.CI){
|
||||
for(auto && q : node.CI){
|
||||
const auto s = p.second;
|
||||
const auto t = q.second;
|
||||
if(s == t) continue;
|
||||
seperating_family[s.base()].push_back(all_pair_seperating_sequences[s.base()][t.base()]);
|
||||
}
|
||||
}
|
||||
|
||||
continue;
|
||||
}
|
||||
|
||||
for(auto && i : node.word)
|
||||
word.push_back(i);
|
||||
|
||||
for(auto && c : node.children)
|
||||
work.push({word, c});
|
||||
}
|
||||
|
||||
return seperating_family;
|
||||
}
|
17
lib/seperating_family.hpp
Normal file
17
lib/seperating_family.hpp
Normal file
|
@ -0,0 +1,17 @@
|
|||
#pragma once
|
||||
|
||||
#include "adaptive_distinguishing_sequence.hpp"
|
||||
#include "seperating_matrix.hpp"
|
||||
#include "types.hpp"
|
||||
|
||||
/*
|
||||
* Given an (incomplete) adaptive distinguishing sequence and all pair
|
||||
* seperating sequences, we can construct a seperating family (as defined
|
||||
* in Lee & Yannakakis). If the adaptive distinguishing sequence is complete,
|
||||
* then the all pair seperating sequences are not needed.
|
||||
*/
|
||||
|
||||
using seperating_set = std::vector<word>;
|
||||
using seperating_family = std::vector<seperating_set>;
|
||||
|
||||
seperating_family create_seperating_family(adaptive_distinguishing_sequence const & sequence, seperating_matrix const & all_pair_seperating_sequences);
|
53
lib/seperating_matrix.cpp
Normal file
53
lib/seperating_matrix.cpp
Normal file
|
@ -0,0 +1,53 @@
|
|||
#include "seperating_matrix.hpp"
|
||||
|
||||
#include <cassert>
|
||||
#include <functional>
|
||||
#include <queue>
|
||||
|
||||
using namespace std;
|
||||
|
||||
seperating_matrix create_all_pair_seperating_sequences(const splitting_tree & root){
|
||||
const auto N = root.states.size();
|
||||
seperating_matrix all_pair_seperating_sequences(N, seperating_row(N));
|
||||
|
||||
queue<reference_wrapper<const splitting_tree>> work;
|
||||
work.push(root);
|
||||
|
||||
// total complexity is O(n^2), as we're visiting each pair only once :)
|
||||
while(!work.empty()){
|
||||
const splitting_tree & node = work.front();
|
||||
work.pop();
|
||||
|
||||
auto it = begin(node.children);
|
||||
auto ed = end(node.children);
|
||||
|
||||
while(it != ed){
|
||||
auto jt = next(it);
|
||||
while(jt != ed){
|
||||
for(auto && s : it->states){
|
||||
for(auto && t : jt->states){
|
||||
assert(all_pair_seperating_sequences[t.base()][s.base()].empty());
|
||||
assert(all_pair_seperating_sequences[s.base()][t.base()].empty());
|
||||
all_pair_seperating_sequences[t.base()][s.base()] = node.seperator;
|
||||
all_pair_seperating_sequences[s.base()][t.base()] = node.seperator;
|
||||
}
|
||||
}
|
||||
jt++;
|
||||
}
|
||||
it++;
|
||||
}
|
||||
|
||||
for(auto && c : node.children){
|
||||
work.push(c);
|
||||
}
|
||||
}
|
||||
|
||||
for(size_t i = 0; i < N; ++i){
|
||||
for(size_t j = 0; j < N; ++j){
|
||||
if(i == j) continue;
|
||||
assert(!all_pair_seperating_sequences[i][j].empty());
|
||||
}
|
||||
}
|
||||
|
||||
return all_pair_seperating_sequences;
|
||||
}
|
15
lib/seperating_matrix.hpp
Normal file
15
lib/seperating_matrix.hpp
Normal file
|
@ -0,0 +1,15 @@
|
|||
#pragma once
|
||||
|
||||
#include "types.hpp"
|
||||
#include "splitting_tree.hpp"
|
||||
|
||||
/*
|
||||
* A seperating matrix is a matrix indexed by states, which assigns to each
|
||||
* pair of (inequivalent) states a seperating sequences. This can be done by
|
||||
* the classical Hopcroft algorithm
|
||||
*/
|
||||
|
||||
using seperating_row = std::vector<word>;
|
||||
using seperating_matrix = std::vector<seperating_row>;
|
||||
|
||||
seperating_matrix create_all_pair_seperating_sequences(splitting_tree const & root);
|
|
@ -1,6 +1,7 @@
|
|||
#include "create_splitting_tree.hpp"
|
||||
#include "splitting_tree.hpp"
|
||||
#include "partition.hpp"
|
||||
|
||||
#include <cassert>
|
||||
#include <functional>
|
||||
#include <numeric>
|
||||
#include <queue>
|
||||
|
@ -8,6 +9,21 @@
|
|||
|
||||
using namespace std;
|
||||
|
||||
splitting_tree::splitting_tree(size_t N, size_t depth)
|
||||
: states(N)
|
||||
, depth(depth)
|
||||
{
|
||||
iota(begin(states), end(states), 0);
|
||||
}
|
||||
|
||||
splitting_tree &lca_impl2(splitting_tree & node){
|
||||
if(node.mark > 1) return node;
|
||||
for(auto && c : node.children){
|
||||
if(c.mark > 0) return lca_impl2(c);
|
||||
}
|
||||
return node; // this is a leaf
|
||||
}
|
||||
|
||||
template <typename T>
|
||||
std::vector<T> concat(std::vector<T> const & l, std::vector<T> const & r){
|
||||
std::vector<T> ret(l.size() + r.size());
|
||||
|
@ -16,7 +32,7 @@ std::vector<T> concat(std::vector<T> const & l, std::vector<T> const & r){
|
|||
return ret;
|
||||
}
|
||||
|
||||
result create_splitting_tree(const Mealy& g, options opt){
|
||||
result create_splitting_tree(const mealy& g, options opt){
|
||||
const auto N = g.graph.size();
|
||||
const auto P = g.input_indices.size();
|
||||
const auto Q = g.output_indices.size();
|
||||
|
@ -30,12 +46,12 @@ result create_splitting_tree(const Mealy& g, options opt){
|
|||
* tree. We keep track of how many times we did no work. If this is too
|
||||
* much, there is no complete splitting tree.
|
||||
*/
|
||||
queue<reference_wrapper<splijtboom>> work;
|
||||
queue<reference_wrapper<splitting_tree>> work;
|
||||
size_t days_without_progress = 0;
|
||||
|
||||
// Some lambda functions capturing some state, makes the code a bit easier :)
|
||||
const auto add_push_new_block = [&work](auto new_blocks, auto & boom) {
|
||||
boom.children.assign(new_blocks.size(), splijtboom(0, boom.depth + 1));
|
||||
boom.children.assign(new_blocks.size(), splitting_tree(0, boom.depth + 1));
|
||||
|
||||
auto i = 0;
|
||||
for(auto && b : new_blocks){
|
||||
|
@ -69,7 +85,7 @@ result create_splitting_tree(const Mealy& g, options opt){
|
|||
// We'll start with the root, obviously
|
||||
work.push(root);
|
||||
while(!work.empty()){
|
||||
splijtboom & boom = work.front();
|
||||
splitting_tree & boom = work.front();
|
||||
work.pop();
|
||||
const auto depth = boom.depth;
|
||||
|
|
@ -2,27 +2,24 @@
|
|||
|
||||
#include "mealy.hpp"
|
||||
|
||||
#include <numeric>
|
||||
#include <type_traits>
|
||||
#include <vector>
|
||||
/*
|
||||
* A splitting tree as defined in Lee & Yannakakis. The structure is also
|
||||
* called a derivation tree in Knuutila. Both the classical Hopcroft algorithm
|
||||
* and the Lee & Yannakakis algorithm produce splitting trees.
|
||||
*/
|
||||
|
||||
struct splijtboom {
|
||||
splijtboom(size_t N, size_t depth)
|
||||
: states(N)
|
||||
, depth(depth)
|
||||
{
|
||||
std::iota(begin(states), end(states), 0);
|
||||
}
|
||||
struct splitting_tree {
|
||||
splitting_tree(size_t N, size_t depth);
|
||||
|
||||
std::vector<state> states;
|
||||
std::vector<splijtboom> children;
|
||||
std::vector<splitting_tree> children;
|
||||
std::vector<input> seperator;
|
||||
size_t depth = 0;
|
||||
mutable int mark = 0; // used for some algorithms...
|
||||
};
|
||||
|
||||
template <typename Fun>
|
||||
void lca_impl1(splijtboom const & node, Fun && f){
|
||||
void lca_impl1(splitting_tree const & node, Fun && f){
|
||||
node.mark = 0;
|
||||
if(!node.children.empty()){
|
||||
for(auto && c : node.children){
|
||||
|
@ -36,24 +33,57 @@ void lca_impl1(splijtboom const & node, Fun && f){
|
|||
}
|
||||
}
|
||||
|
||||
inline splijtboom & lca_impl2(splijtboom & node){
|
||||
if(node.mark > 1) return node;
|
||||
for(auto && c : node.children){
|
||||
if(c.mark > 0) return lca_impl2(c);
|
||||
}
|
||||
return node; // this is a leaf
|
||||
}
|
||||
splitting_tree & lca_impl2(splitting_tree & node);
|
||||
|
||||
template <typename Fun>
|
||||
splijtboom & lca(splijtboom & root, Fun && f){
|
||||
splitting_tree & lca(splitting_tree & root, Fun && f){
|
||||
static_assert(std::is_same<decltype(f(0)), bool>::value, "f should return a bool");
|
||||
lca_impl1(root, f);
|
||||
return lca_impl2(root);
|
||||
}
|
||||
|
||||
template <typename Fun>
|
||||
const splijtboom & lca(const splijtboom & root, Fun && f){
|
||||
const splitting_tree & lca(const splitting_tree & root, Fun && f){
|
||||
static_assert(std::is_same<decltype(f(0)), bool>::value, "f should return a bool");
|
||||
lca_impl1(root, f);
|
||||
return lca_impl2(const_cast<splijtboom&>(root));
|
||||
return lca_impl2(const_cast<splitting_tree&>(root));
|
||||
}
|
||||
|
||||
|
||||
/*
|
||||
* The algorithm to create a splitting tree can be altered in some ways. This
|
||||
* struct provides options to the algorithm. There are two common setups.
|
||||
*/
|
||||
|
||||
struct options {
|
||||
bool check_validity = true;
|
||||
bool cache_succesors = true;
|
||||
};
|
||||
|
||||
constexpr options lee_yannakakis_style{true, true};
|
||||
constexpr options hopcroft_style{false, false};
|
||||
|
||||
|
||||
/*
|
||||
* The algorithm to create a splitting tree also produces some other useful
|
||||
* data. This struct captures exactly that.
|
||||
*/
|
||||
|
||||
struct result {
|
||||
result(size_t N)
|
||||
: root(N, 0)
|
||||
, successor_cache()
|
||||
, is_complete(true)
|
||||
{}
|
||||
|
||||
// The splitting tree as described in Lee & Yannakakis
|
||||
splitting_tree root;
|
||||
|
||||
// Encodes f_u : depth -> state -> state, where only the depth of u is of importance
|
||||
std::vector<std::vector<state>> successor_cache;
|
||||
|
||||
// false <-> no adaptive distinguishing sequence
|
||||
bool is_complete;
|
||||
};
|
||||
|
||||
result create_splitting_tree(mealy const & m, options opt);
|
||||
|
|
42
lib/transfer_sequences.cpp
Normal file
42
lib/transfer_sequences.cpp
Normal file
|
@ -0,0 +1,42 @@
|
|||
#include "transfer_sequences.hpp"
|
||||
|
||||
#include "mealy.hpp"
|
||||
|
||||
#include <queue>
|
||||
|
||||
using namespace std;
|
||||
|
||||
transfer_sequences create_transfer_sequences(const mealy& machine, state s){
|
||||
vector<bool> visited(machine.graph_size, false);
|
||||
vector<word> words(machine.graph_size);
|
||||
|
||||
queue<state> work;
|
||||
work.push(s);
|
||||
while(!work.empty()){
|
||||
const auto u = work.front();
|
||||
work.pop();
|
||||
|
||||
if(visited[u.base()]) continue;
|
||||
|
||||
visited[u.base()] = true;
|
||||
|
||||
for(input i = 0; i < machine.input_size; ++i){
|
||||
const auto v = apply(machine, u, i).to;
|
||||
if(visited[v.base()]) continue;
|
||||
|
||||
words[v.base()] = words[u.base()];
|
||||
words[v.base()].push_back(i);
|
||||
work.push(v);
|
||||
}
|
||||
}
|
||||
|
||||
return words;
|
||||
}
|
||||
|
||||
std::vector<transfer_sequences> create_all_transfer_sequences(const mealy& machine){
|
||||
vector<transfer_sequences> transfer_sequences(machine.graph_size);
|
||||
for(state s = 0; s < machine.graph_size; ++s){
|
||||
transfer_sequences[s.base()] = create_transfer_sequences(machine, s);
|
||||
}
|
||||
return transfer_sequences;
|
||||
}
|
10
lib/transfer_sequences.hpp
Normal file
10
lib/transfer_sequences.hpp
Normal file
|
@ -0,0 +1,10 @@
|
|||
#pragma once
|
||||
|
||||
#include "types.hpp"
|
||||
|
||||
struct mealy;
|
||||
|
||||
using transfer_sequences = std::vector<word>;
|
||||
|
||||
transfer_sequences create_transfer_sequences(mealy const & machine, state s);
|
||||
std::vector<transfer_sequences> create_all_transfer_sequences(mealy const & machine);
|
14
lib/types.hpp
Normal file
14
lib/types.hpp
Normal file
|
@ -0,0 +1,14 @@
|
|||
#pragma once
|
||||
|
||||
#include "phantom.hpp"
|
||||
|
||||
#include <vector>
|
||||
|
||||
/* We use size_t's for easy indexing. But we do not want to mix states and
|
||||
* inputs. We use phantom typing to "generate" distinguished types :).
|
||||
*/
|
||||
using state = phantom<size_t, struct state_tag>;
|
||||
using input = phantom<size_t, struct input_tag>;
|
||||
using output = phantom<size_t, struct output_tag>;
|
||||
|
||||
using word = std::vector<input>;
|
|
@ -1,5 +1,5 @@
|
|||
#include "write_tree_to_dot.hpp"
|
||||
#include "create_adaptive_distinguishing_sequence.hpp"
|
||||
#include "adaptive_distinguishing_sequence.hpp"
|
||||
#include "splitting_tree.hpp"
|
||||
|
||||
#include <fstream>
|
||||
|
@ -17,8 +17,8 @@ ostream & operator<<(ostream& out, vector<T> const & x){
|
|||
}
|
||||
|
||||
|
||||
void write_splitting_tree_to_dot(const splijtboom& root, ostream& out){
|
||||
write_tree_to_dot(root, [](const splijtboom & node, ostream& out){
|
||||
void write_splitting_tree_to_dot(const splitting_tree& root, ostream& out){
|
||||
write_tree_to_dot(root, [](const splitting_tree & node, ostream& out){
|
||||
out << node.states;
|
||||
if(!node.seperator.empty()){
|
||||
out << "\\n" << node.seperator;
|
||||
|
@ -26,13 +26,13 @@ void write_splitting_tree_to_dot(const splijtboom& root, ostream& out){
|
|||
}, out);
|
||||
}
|
||||
|
||||
void write_splitting_tree_to_dot(const splijtboom& root, const string& filename){
|
||||
void write_splitting_tree_to_dot(const splitting_tree& root, const string& filename){
|
||||
ofstream file(filename);
|
||||
write_splitting_tree_to_dot(root, file);
|
||||
}
|
||||
|
||||
void write_adaptive_distinguishing_sequence_to_dot(const distinguishing_sequence & root, ostream & out){
|
||||
write_tree_to_dot(root, [](const distinguishing_sequence & node, ostream& out){
|
||||
void write_adaptive_distinguishing_sequence_to_dot(const adaptive_distinguishing_sequence & root, ostream & out){
|
||||
write_tree_to_dot(root, [](const adaptive_distinguishing_sequence & node, ostream& out){
|
||||
if(!node.word.empty()){
|
||||
out << node.word;
|
||||
} else {
|
||||
|
@ -43,7 +43,7 @@ void write_adaptive_distinguishing_sequence_to_dot(const distinguishing_sequence
|
|||
}, out);
|
||||
}
|
||||
|
||||
void write_adaptive_distinguishing_sequence_to_dot(const distinguishing_sequence & root, string const & filename){
|
||||
void write_adaptive_distinguishing_sequence_to_dot(const adaptive_distinguishing_sequence & root, string const & filename){
|
||||
ofstream file(filename);
|
||||
write_adaptive_distinguishing_sequence_to_dot(root, file);
|
||||
}
|
||||
|
|
|
@ -36,10 +36,10 @@ void write_tree_to_dot(const T & tree, NodeString && node_string, std::ostream &
|
|||
|
||||
|
||||
// Specialized printing for splitting trees and dist seqs
|
||||
struct splijtboom;
|
||||
void write_splitting_tree_to_dot(const splijtboom & root, std::ostream & out);
|
||||
void write_splitting_tree_to_dot(const splijtboom & root, std::string const & filename);
|
||||
struct splitting_tree;
|
||||
void write_splitting_tree_to_dot(const splitting_tree & root, std::ostream & out);
|
||||
void write_splitting_tree_to_dot(const splitting_tree & root, std::string const & filename);
|
||||
|
||||
struct distinguishing_sequence;
|
||||
void write_adaptive_distinguishing_sequence_to_dot(const distinguishing_sequence & root, std::ostream & out);
|
||||
void write_adaptive_distinguishing_sequence_to_dot(const distinguishing_sequence & root, std::string const & filename);
|
||||
struct adaptive_distinguishing_sequence;
|
||||
void write_adaptive_distinguishing_sequence_to_dot(const adaptive_distinguishing_sequence & root, std::ostream & out);
|
||||
void write_adaptive_distinguishing_sequence_to_dot(const adaptive_distinguishing_sequence & root, std::string const & filename);
|
||||
|
|
100
src/conf.cpp
100
src/conf.cpp
|
@ -1,9 +1,7 @@
|
|||
#include <mealy.hpp>
|
||||
#include <read_mealy_from_dot.hpp>
|
||||
|
||||
#include <boost/iostreams/device/file_descriptor.hpp>
|
||||
#include <boost/iostreams/filtering_stream.hpp>
|
||||
#include <boost/iostreams/filter/gzip.hpp>
|
||||
#include <io.hpp>
|
||||
|
||||
#include <fstream>
|
||||
#include <iostream>
|
||||
|
@ -11,40 +9,68 @@
|
|||
|
||||
using namespace std;
|
||||
|
||||
int main(int argc, char *argv[]){
|
||||
if(argc != 3) return 37;
|
||||
|
||||
const string m_filename = argv[1];
|
||||
const string c_filename = argv[2];
|
||||
|
||||
ifstream m_file(m_filename);
|
||||
boost::iostreams::filtering_istream c_file;
|
||||
c_file.push(boost::iostreams::gzip_decompressor());
|
||||
c_file.push(boost::iostreams::file_descriptor_source(c_filename));
|
||||
|
||||
const auto machine = read_mealy_from_dot(m_file);
|
||||
|
||||
string in;
|
||||
string out;
|
||||
|
||||
state s = 0;
|
||||
size_t count = 0;
|
||||
while(c_file >> in >> out){
|
||||
const auto i = machine.input_indices.at(in);
|
||||
const auto o = machine.output_indices.at(out);
|
||||
|
||||
const auto ret = apply(machine, s, i);
|
||||
if(ret.output != o){
|
||||
cout << "conformance fail" << endl;
|
||||
cout << ret.output << " != " << o << endl;
|
||||
cout << "at index " << count << endl;
|
||||
return 1;
|
||||
}
|
||||
|
||||
s = ret.to;
|
||||
count++;
|
||||
template <typename T>
|
||||
vector<string> create_reverse_map(map<string, T> const & indices){
|
||||
vector<string> ret(indices.size());
|
||||
for(auto&& p : indices){
|
||||
ret[p.second.base()] = p.first;
|
||||
}
|
||||
|
||||
cout << "conformance succes " << count << endl;
|
||||
return ret;
|
||||
}
|
||||
|
||||
int main(int argc, char *argv[]){
|
||||
if(argc != 4) return 37;
|
||||
|
||||
const string spec_filename = argv[1];
|
||||
const string impl_filename = argv[2];
|
||||
const string suite_filename = argv[3];
|
||||
|
||||
ifstream spec_file(spec_filename);
|
||||
ifstream impl_file(impl_filename);
|
||||
|
||||
boost::iostreams::filtering_istream suite_file;
|
||||
suite_file.push(boost::iostreams::gzip_decompressor());
|
||||
suite_file.push(boost::iostreams::file_descriptor_source(suite_filename));
|
||||
boost::archive::text_iarchive archive(suite_file);
|
||||
|
||||
const auto spec = read_mealy_from_dot(spec_file);
|
||||
const auto impl = read_mealy_from_dot(impl_file);
|
||||
|
||||
const auto spec_o_map = create_reverse_map(spec.output_indices);
|
||||
const auto impl_o_map = create_reverse_map(impl.output_indices);
|
||||
|
||||
vector<vector<string>> suite;
|
||||
archive >> suite;
|
||||
|
||||
size_t tcount = 0;
|
||||
for(auto && test : suite){
|
||||
state s = 0;
|
||||
state t = 0;
|
||||
|
||||
size_t count = 0;
|
||||
for(auto && i : test){
|
||||
const auto i1 = spec.input_indices.at(i);
|
||||
const auto r1 = apply(spec, s, i1);
|
||||
const auto o1 = spec_o_map[r1.output.base()];
|
||||
s = r1.to;
|
||||
|
||||
const auto i2 = spec.input_indices.at(i);
|
||||
const auto r2 = apply(impl, t, i2);
|
||||
const auto o2 = spec_o_map[r2.output.base()];
|
||||
t = r2.to;
|
||||
|
||||
if(o1 != o2){
|
||||
cout << "conformance fail" << endl;
|
||||
cout << o1 << " != " << o2 << endl;
|
||||
cout << "at test " << tcount << endl;
|
||||
cout << "at char " << count << endl;
|
||||
return 1;
|
||||
}
|
||||
count++;
|
||||
}
|
||||
tcount++;
|
||||
}
|
||||
|
||||
cout << "conformance succes " << tcount << endl;
|
||||
}
|
||||
|
||||
|
|
303
src/main.cpp
303
src/main.cpp
|
@ -1,20 +1,15 @@
|
|||
#include <create_adaptive_distinguishing_sequence.hpp>
|
||||
#include <create_splitting_tree.hpp>
|
||||
#include <adaptive_distinguishing_sequence.hpp>
|
||||
#include <logging.hpp>
|
||||
#include <mealy.hpp>
|
||||
#include <read_mealy_from_dot.hpp>
|
||||
#include <write_tree_to_dot.hpp>
|
||||
#include <seperating_family.hpp>
|
||||
#include <seperating_matrix.hpp>
|
||||
#include <splitting_tree.hpp>
|
||||
#include <transfer_sequences.hpp>
|
||||
|
||||
#include <boost/iostreams/device/file_descriptor.hpp>
|
||||
#include <boost/iostreams/filter/gzip.hpp>
|
||||
#include <boost/iostreams/filtering_stream.hpp>
|
||||
#include <io.hpp>
|
||||
|
||||
#include <cassert>
|
||||
#include <fstream>
|
||||
#include <functional>
|
||||
#include <iostream>
|
||||
#include <stack>
|
||||
#include <utility>
|
||||
#include <vector>
|
||||
#include <future>
|
||||
|
||||
using namespace std;
|
||||
|
||||
|
@ -27,31 +22,26 @@ vector<string> create_reverse_map(map<string, T> const & indices){
|
|||
return ret;
|
||||
}
|
||||
|
||||
auto bfs(Mealy const & machine, state s){
|
||||
vector<bool> visited(machine.graph_size, false);
|
||||
vector<vector<input>> words(machine.graph_size);
|
||||
template <typename T>
|
||||
std::vector<T> concat(std::vector<T> const & l, std::vector<T> const & r){
|
||||
std::vector<T> ret(l.size() + r.size());
|
||||
auto it = copy(begin(l), end(l), begin(ret));
|
||||
copy(begin(r), end(r), it);
|
||||
return ret;
|
||||
}
|
||||
|
||||
queue<state> work;
|
||||
work.push(s);
|
||||
while(!work.empty()){
|
||||
const auto u = work.front();
|
||||
work.pop();
|
||||
|
||||
if(visited[u.base()]) continue;
|
||||
|
||||
visited[u.base()] = true;
|
||||
|
||||
for(input i = 0; i < machine.input_size; ++i){
|
||||
const auto v = apply(machine, u, i).to;
|
||||
if(visited[v.base()]) continue;
|
||||
|
||||
words[v.base()] = words[u.base()];
|
||||
words[v.base()].push_back(i);
|
||||
work.push(v);
|
||||
template <typename T>
|
||||
std::vector<std::vector<T>> all_seqs(T min, T max, std::vector<std::vector<T>> const & seqs){
|
||||
std::vector<std::vector<T>> ret((max - min) * seqs.size());
|
||||
auto it = begin(ret);
|
||||
for(auto && x : seqs){
|
||||
for(T i = min; i < max; ++i){
|
||||
it->assign(x.size()+1);
|
||||
auto e = copy(x.begin(), x.end(), it->begin());
|
||||
*e++ = i;
|
||||
}
|
||||
}
|
||||
|
||||
return words;
|
||||
return ret;
|
||||
}
|
||||
|
||||
int main(int argc, char *argv[]){
|
||||
|
@ -63,218 +53,85 @@ int main(int argc, char *argv[]){
|
|||
return read_mealy_from_dot(filename);
|
||||
}();
|
||||
|
||||
const auto splitting_tree_hopcroft = [&]{
|
||||
timer t("creating hopcroft splitting tree");
|
||||
return create_splitting_tree(machine, without_validity_check);
|
||||
}();
|
||||
auto all_pair_seperating_sequences_fut = async([&]{
|
||||
const auto splitting_tree_hopcroft = [&]{
|
||||
timer t("creating hopcroft splitting tree");
|
||||
return create_splitting_tree(machine, hopcroft_style);
|
||||
}();
|
||||
|
||||
const auto all_pair_seperating_sequences = [&]{
|
||||
timer t("gathering all seperating sequences");
|
||||
|
||||
vector<vector<vector<input>>> all_pair_seperating_sequences(machine.graph_size, vector<vector<input>>(machine.graph_size));
|
||||
|
||||
queue<reference_wrapper<const splijtboom>> work;
|
||||
work.push(splitting_tree_hopcroft.root);
|
||||
|
||||
// total complexity is O(n^2), as we're visiting each pair only once :)
|
||||
while(!work.empty()){
|
||||
const splijtboom & node = work.front();
|
||||
work.pop();
|
||||
|
||||
auto it = begin(node.children);
|
||||
auto ed = end(node.children);
|
||||
|
||||
while(it != ed){
|
||||
auto jt = next(it);
|
||||
while(jt != ed){
|
||||
for(auto && s : it->states){
|
||||
for(auto && t : jt->states){
|
||||
assert(all_pair_seperating_sequences[t.base()][s.base()].empty());
|
||||
assert(all_pair_seperating_sequences[s.base()][t.base()].empty());
|
||||
all_pair_seperating_sequences[t.base()][s.base()] = node.seperator;
|
||||
all_pair_seperating_sequences[s.base()][t.base()] = node.seperator;
|
||||
}
|
||||
}
|
||||
jt++;
|
||||
}
|
||||
it++;
|
||||
}
|
||||
|
||||
for(auto && c : node.children){
|
||||
work.push(c);
|
||||
}
|
||||
}
|
||||
|
||||
for(size_t i = 0; i < machine.graph_size; ++i){
|
||||
for(size_t j = 0; j < machine.graph_size; ++j){
|
||||
if(i == j) continue;
|
||||
assert(!all_pair_seperating_sequences[i][j].empty());
|
||||
}
|
||||
}
|
||||
const auto all_pair_seperating_sequences = [&]{
|
||||
timer t("gathering all seperating sequences");
|
||||
return create_all_pair_seperating_sequences(splitting_tree_hopcroft.root);
|
||||
}();
|
||||
|
||||
return all_pair_seperating_sequences;
|
||||
}();
|
||||
});
|
||||
|
||||
const auto splitting_tree = [&]{
|
||||
timer t("Lee & Yannakakis I");
|
||||
return create_splitting_tree(machine, with_validity_check);
|
||||
}();
|
||||
auto sequence_fut = async([&]{
|
||||
const auto splitting_tree = [&]{
|
||||
timer t("Lee & Yannakakis I");
|
||||
return create_splitting_tree(machine, lee_yannakakis_style);
|
||||
}();
|
||||
|
||||
if(false){
|
||||
timer t("writing splitting tree");
|
||||
const string tree_filename = splitting_tree.is_complete ? (filename + ".splitting_tree") : (filename + ".incomplete_splitting_tree");
|
||||
write_splitting_tree_to_dot(splitting_tree.root, tree_filename);
|
||||
}
|
||||
const auto sequence = [&]{
|
||||
timer t("Lee & Yannakakis II");
|
||||
return create_adaptive_distinguishing_sequence(splitting_tree);
|
||||
}();
|
||||
|
||||
const auto sequence = [&]{
|
||||
timer t("Lee & Yannakakis II");
|
||||
return create_adaptive_distinguishing_sequence(splitting_tree);
|
||||
}();
|
||||
return sequence;
|
||||
});
|
||||
|
||||
if(false){
|
||||
timer t("writing dist sequence");
|
||||
const string dseq_filename = splitting_tree.is_complete ? (filename + ".dist_seq") : (filename + ".incomplete_dist_seq");
|
||||
write_adaptive_distinguishing_sequence_to_dot(sequence, dseq_filename);
|
||||
}
|
||||
auto transfer_sequences_fut = std::async([&]{
|
||||
timer t("determining transfer sequences");
|
||||
return create_transfer_sequences(machine, 0);
|
||||
});
|
||||
|
||||
const auto all_pair_seperating_sequences = all_pair_seperating_sequences_fut.get();
|
||||
const auto sequence = sequence_fut.get();
|
||||
|
||||
const auto seperating_family = [&]{
|
||||
timer t("making seperating family");
|
||||
using Word = vector<input>;
|
||||
using SepSet = vector<Word>;
|
||||
vector<SepSet> seperating_family(machine.graph_size);
|
||||
|
||||
stack<pair<vector<input>, reference_wrapper<const distinguishing_sequence>>> work;
|
||||
work.push({{}, sequence});
|
||||
|
||||
while(!work.empty()){
|
||||
auto word = work.top().first;
|
||||
const distinguishing_sequence & node = work.top().second;
|
||||
work.pop();
|
||||
|
||||
if(node.children.empty()){
|
||||
// add sequence to this leave
|
||||
for(auto && p : node.CI){
|
||||
const auto state = p.second;
|
||||
seperating_family[state.base()].push_back(word);
|
||||
}
|
||||
|
||||
// if the leaf is not a singleton, we need the all_pair seperating seqs
|
||||
for(auto && p : node.CI){
|
||||
for(auto && q : node.CI){
|
||||
const auto s = p.second;
|
||||
const auto t = q.second;
|
||||
if(s == t) continue;
|
||||
seperating_family[s.base()].push_back(all_pair_seperating_sequences[s.base()][t.base()]);
|
||||
}
|
||||
}
|
||||
|
||||
continue;
|
||||
}
|
||||
|
||||
for(auto && i : node.word)
|
||||
word.push_back(i);
|
||||
|
||||
for(auto && c : node.children)
|
||||
work.push({word, c});
|
||||
}
|
||||
|
||||
return seperating_family;
|
||||
return create_seperating_family(sequence, all_pair_seperating_sequences);
|
||||
}();
|
||||
|
||||
const auto transfer_sequences = transfer_sequences_fut.get();
|
||||
const auto inputs = create_reverse_map(machine.input_indices);
|
||||
const auto outputs = create_reverse_map(machine.output_indices);
|
||||
const auto print_uio = [&](auto const & word, auto & out, state s) -> auto & {
|
||||
for(auto && i : word){
|
||||
const auto o = apply(machine, s, i);
|
||||
s = o.to;
|
||||
|
||||
out << inputs[i.base()] << ' ' << outputs[o.output.base()] << '\n';
|
||||
}
|
||||
return out;
|
||||
};
|
||||
{
|
||||
timer t("making test suite");
|
||||
vector<word> suite;
|
||||
|
||||
const auto transfer_sequences = [&]{
|
||||
timer t("determining transfer sequences");
|
||||
vector<vector<vector<input>>> transfer_sequences(machine.graph_size);
|
||||
for(state s = 0; s < machine.graph_size; ++s){
|
||||
transfer_sequences[s.base()] = bfs(machine, s);
|
||||
}
|
||||
return transfer_sequences;
|
||||
}();
|
||||
const auto prefix = transfer_sequences[s.base()];
|
||||
|
||||
const auto short_checking_seq = [&]{
|
||||
timer t("making short checking seq");
|
||||
vector<input> big_seq;
|
||||
state from = 0;
|
||||
for(state s = from; s < machine.graph_size; ++s){
|
||||
for(const auto & seq : seperating_family[s.base()]){
|
||||
copy(begin(seq), end(seq), back_inserter(big_seq));
|
||||
from = apply(machine, s, begin(seq), end(seq)).to;
|
||||
|
||||
const auto to = s;
|
||||
if(from == to) continue;
|
||||
|
||||
const auto transfer = transfer_sequences[from.base()][to.base()];
|
||||
copy(begin(transfer), end(transfer), back_inserter(big_seq));
|
||||
}
|
||||
|
||||
const auto to = s+1;
|
||||
if(from == to) continue;
|
||||
|
||||
const auto transfer = transfer_sequences[from.base()][to.base()];
|
||||
copy(begin(transfer), end(transfer), back_inserter(big_seq));
|
||||
}
|
||||
|
||||
return big_seq;
|
||||
}();
|
||||
|
||||
{
|
||||
timer t("writing short checking seq");
|
||||
const string uios_filename = filename + ".short_check_seq";
|
||||
|
||||
boost::iostreams::filtering_ostream out;
|
||||
out.push(boost::iostreams::gzip_compressor());
|
||||
out.push(boost::iostreams::file_descriptor_sink(uios_filename));
|
||||
|
||||
print_uio(short_checking_seq, out, 0);
|
||||
}
|
||||
|
||||
const auto long_checking_seq = [&]{
|
||||
timer t("making long checking seq");
|
||||
vector<input> big_seq;
|
||||
state from = 0;
|
||||
for(state s = from; s < machine.graph_size; ++s){
|
||||
for(input i = 0; i < machine.input_size; ++i){
|
||||
const auto t = apply(machine, s, i).to;
|
||||
|
||||
for(auto && seq : seperating_family[t.base()]){
|
||||
if(from != s){
|
||||
const auto transfer = transfer_sequences[from.base()][s.base()];
|
||||
copy(begin(transfer), end(transfer), back_inserter(big_seq));
|
||||
from = s;
|
||||
}
|
||||
|
||||
big_seq.push_back(i);
|
||||
from = t;
|
||||
|
||||
copy(begin(seq), end(seq), back_inserter(big_seq));
|
||||
from = apply(machine, from, begin(seq), end(seq)).to;
|
||||
}
|
||||
for(auto && suffix : seperating_family[s.base()]){
|
||||
suite.push_back(concat(prefix, suffix));
|
||||
}
|
||||
}
|
||||
|
||||
return big_seq;
|
||||
}();
|
||||
vector<vector<string>> real_suite(suite.size());
|
||||
transform(suite.begin(), suite.end(), real_suite.begin(), [&inputs](auto const & seq){
|
||||
vector<string> seq2(seq.size());
|
||||
transform(seq.begin(), seq.end(), seq2.begin(), [&inputs](auto const & i){
|
||||
return inputs[i.base()];
|
||||
});
|
||||
return seq2;
|
||||
});
|
||||
|
||||
{
|
||||
timer t("writing long checking seq");
|
||||
const string uios_filename = filename + ".full_check_seq";
|
||||
// for(auto && test : real_suite) {
|
||||
// for(auto && s : test) {
|
||||
// cout << s << " ";
|
||||
// }
|
||||
// cout << endl;
|
||||
// }
|
||||
|
||||
boost::iostreams::filtering_ostream out;
|
||||
out.push(boost::iostreams::gzip_compressor());
|
||||
out.push(boost::iostreams::file_descriptor_sink(uios_filename));
|
||||
boost::iostreams::filtering_ostream compressed_stream;
|
||||
compressed_stream.push(boost::iostreams::gzip_compressor());
|
||||
compressed_stream.push(boost::iostreams::file_descriptor_sink("test_suite"));
|
||||
|
||||
print_uio(long_checking_seq, out, 0);
|
||||
boost::archive::text_oarchive archive(compressed_stream);
|
||||
archive << real_suite;
|
||||
}
|
||||
}
|
||||
|
||||
|
|
|
@ -1,7 +1,6 @@
|
|||
#include <read_mealy_from_dot.hpp>
|
||||
#include <mealy.hpp>
|
||||
|
||||
#include <string>
|
||||
#include <fstream>
|
||||
#include <iostream>
|
||||
|
||||
|
|
Loading…
Add table
Reference in a new issue