1
Fork 0
mirror of https://git.cs.ou.nl/joshua.moerman/mealy-decompose.git synced 2025-04-30 02:07:44 +02:00

Changes the script so it does all the things I did manually first

This commit is contained in:
Joshua Moerman 2023-12-08 20:17:07 +01:00
parent a20e5e9317
commit 586d01bceb
3 changed files with 101 additions and 69 deletions

View file

@ -5,14 +5,14 @@ import Mealy
import MealyRefine
import Partition
import Control.Monad (forM_, when, replicateM)
import Control.Monad.IO.Class (liftIO)
import Control.Monad.Trans.State.Strict
import Control.Monad (forM_, when, forever)
import Data.Function (on)
import Data.List (minimumBy, maximum, sort, intercalate)
import Data.Map.Strict qualified as Map
import Data.Maybe (mapMaybe)
import Data.List.Ordered (nubSort)
import Data.List (minimumBy)
import Data.Function (on)
import Data.Set qualified as Set
import System.Environment
import Text.Megaparsec
@ -41,7 +41,7 @@ main = do
print . take 4 . inputs $ machine
print . take 4 . outputs $ machine
-- DEBUG OUTPUT
-- -- DEBUG OUTPUT
-- forM_ (states machine) (\s -> do
-- print s
-- forM_ (inputs machine) (\i -> do
@ -70,60 +70,31 @@ main = do
printPartition partition
)
{-
let totalSize = sum (fmap (numBlocks . snd) projections)
putStrLn $ "total size = " <> show totalSize
let score p1 p2 p3 = numBlocks p3 - numBlocks p2 - numBlocks p1
combine (o1, p1) (o2, p2) = let p3 = commonRefinement p1 p2 in ((o1, o2, p3), score p1 p2 p3)
allCombs projs = [combine op1 op2 | op1 <- projs, op2 <- projs, fst op1 < fst op2]
minComb projs = minimumBy (compare `on` snd) (allCombs projs)
_ <- flip execStateT (Map.fromList projections, totalSize) $ forever (do
(projmap, currentSize) <- get
liftIO . print . fmap numBlocks . Map.elems $ projmap
let ((o1, o2, p3), gain) = minComb (Map.assocs projmap)
o3 = o1 <> "x" <> o2
newSize = currentSize + gain
newProjmap = Map.insert o3 p3 . Map.delete o2 . Map.delete o1 $ projmap
liftIO $ putStrLn (show o3 <> " -> " <> show newSize)
put (newProjmap, newSize)
)
print "done"
-}
{-
-- Check refinement relations for all pairs
-- This is a bit messy, it skips machines which are equivalent
-- to earlier checked machines, so we thread some state through this
-- computation. (And I also want some IO for debugging purposes.)
(equiv, rel) <- flip execStateT (Map.empty, []) $ do
forM_ projections (\(o1, b1) -> do
(repr, _) <- get
if o1 `Map.member` repr
then do
liftIO . putStrLn $ "skip " <> (show o1) <> " |-> " <> (show (repr Map.! o1))
else do
liftIO $ print o1
forM_ projections (\(o2, b2) -> do
(repr0, _) <- get
when (o1 < o2 && o2 `Map.notMember` repr0) $ do
case comparePartitions b1 b2 of
Equivalent -> do
(repr, ls) <- get
put (Map.insert o2 o1 repr, ls)
Refinement -> do
(repr, ls) <- get
put (repr, (o1, o2):ls)
Coarsening -> do
(repr, ls) <- get
put (repr, (o2, o1):ls)
Incomparable -> return ()
)
)
-- First we check eqiuvalent partitions, so that we only work on one
-- item in each equivalence class. This could be merged with the next
-- phase of checking refinement, and that would be faster. But this is
-- simpler.
let checkRelsFor o1 p1 =
forM_ projections (\(o2, p2) -> do
(repr, ls) <- get
-- We skip if o2 is equivelent to an earlier o
when (o1 < o2 && o2 `Map.notMember` repr) $ do
case isEquivalent p1 p2 of
-- Equivalent: let o2 point to o1
True -> put (Map.insert o2 o1 repr, ls)
False -> return ()
)
checkAllRels projs =
forM_ projs (\(o1, p1) -> do
-- First we check if o1 is equivalent to an earlier o
-- If so, we skip it. Else, we add it to the unique
-- components and compare to all others.
(repr, ls) <- get
when (o1 `Map.notMember` repr) $ do
put (repr, (o1, p1):ls)
checkRelsFor o1 p1
)
(equiv, uniqPartitions) = execState (checkAllRels projections) (Map.empty, [])
putStrLn ""
putStrLn "Equivalences"
@ -131,12 +102,74 @@ main = do
putStrLn $ " " <> (show o2) <> " == " <> (show o1)
)
let cleanRel = [(Map.findWithDefault o1 o1 equiv, Map.findWithDefault o2 o2 equiv) | (o1, o2) <- rel]
-- Then we compare each pair of partitions. If one is a coarsening of
-- another, we can skip it later on. That is to say: we only want the
-- finest partitions.
let compareAll partitions =
forM_ partitions (\(o1, b1) ->
forM_ partitions (\(o2, b2) ->
when (o1 < o2) $ do
ls <- get
case comparePartitions b1 b2 of
Equivalent -> error "cannot happen"
Refinement -> put $ (o1, o2):ls
Coarsening -> put $ (o2, o1):ls
Incomparable -> return ()
)
)
rel = execState (compareAll uniqPartitions) []
putStrLn ""
putStrLn "Relation (smaller points to bigger)"
forM_ (nubSort cleanRel) (\(o1, o2) -> do
putStrLn "Relation, coarser points to finer (bigger)"
forM_ rel (\(o1, o2) -> do
putStrLn $ " " <> (show o2) <> " -> " <> (show o1)
)
-- Get rid of the coarser partitions
let lowElements = Set.fromList . fmap snd $ rel
allElements = Set.fromList . fmap fst $ uniqPartitions
topElements = Set.difference allElements lowElements
mods = Map.fromList uniqPartitions -- could be a lazy map
topMods = Map.assocs $ Map.restrictKeys mods topElements
foo (a, b) = (numBlocks b, a)
putStrLn ""
putStrLn "Top modules"
forM_ (reverse . sort . fmap foo $ topMods) (\(b, o) -> do
putStrLn $ " " <> (show o) <> " has size " <> (show b)
)
-- Now let's combine components to minimise the total size
let totalSize = sum (fmap (numBlocks . snd) topMods)
putStrLn ""
putStrLn $ "num = " <> show (length topMods) <> ", size = " <> show totalSize
let score ps p3 = numBlocks p3 - sum (fmap numBlocks ps)
combine ops = let os = fmap fst ops
ps = fmap snd ops
p3 = foldr1 commonRefinement ps
in ((os, p3), score ps p3)
isSortedOn f ls = and $ zipWith (\a b -> f a < f b) ls (drop 1 ls)
allCombs n projs = fmap combine . filter (isSortedOn fst) $ replicateM n projs
minComb n projs = minimumBy (compare `on` snd) (allCombs n projs)
let loop 1 = return ()
loop n = do
(projmap, currentSize) <- get
-- liftIO . print . fmap numBlocks . Map.elems $ projmap
let ((os, p3), gain) = minComb n (Map.assocs projmap)
o3 = intercalate "x" os
newSize = currentSize + gain
newProjmap = Map.insert o3 p3 . Map.withoutKeys projmap $ Set.fromList os
liftIO . putStrLn $ show o3 <> " -> num = " <> show (Map.size newProjmap) <> ", size = " <> show newSize <> ", max = " <> show (maximum . fmap numBlocks . Map.elems $ newProjmap)
put (newProjmap, newSize)
if Map.size newProjmap < n
then loop (Map.size newProjmap)
else loop n
putStrLn "2"
_ <- execStateT (loop 2) (Map.fromList topMods, totalSize)
return ()
-}

View file

@ -28,16 +28,17 @@ type Trans = (Stat, Stat, Input, Output)
type Parser = Parsec Void String
parseTrans :: Parser Trans
parseTrans = assoc <$> identifier <* symbol "->" <*> identifier <*> brackets parseLabel <* optional (symbol ";")
parseTrans = assoc <$> identifierQ <* symbol "->" <*> identifierQ <*> brackets parseLabel <* optional (symbol ";")
where
-- defines whitespace and lexemes
sc = L.space space1 empty empty
lexeme = L.lexeme sc
symbol = L.symbol sc
-- state, input, output is any string of alphaNumChar's
isAlphaNumExtra c = isAlphaNum c || c == '.' || c == '-'
isAlphaNumExtra c = isAlphaNum c || c == '_' || c == '+' || c == '.' || c == ',' || c == '-' || c == '(' || c == ')'
alphaNumCharExtra = satisfy isAlphaNumExtra <?> "alphanumeric character or extra"
identifier = lexeme (some alphaNumCharExtra)
identifierQ = identifier <|> between (symbol "\"") (symbol "\"") identifier
-- The label has the shape [label="i/o"]
brackets = between (symbol "[") (symbol "]")
parseLabel = (,) <$> (symbol "label=\"" *> identifier <* symbol "/") <*> (identifier <* symbol "\"")

View file

@ -31,13 +31,11 @@ commonRefinement p1 p2 =
-- stopping early. This is already much faster than what is in
-- the CoPaR library, so I won't bother.
isRefinementOf2 :: Partition -> Partition -> Bool
isRefinementOf2 refined original =
numBlocks refined == numBlocks (commonRefinement refined original)
isRefinementOf2 refined original = comparePartitions refined original == Refinement
-- See comment at isRefinementOf2
isEquivalent :: Partition -> Partition -> Bool
isEquivalent p1 p2 =
p1 == p2 || (numBlocks p1 == numBlocks p2 && numBlocks p1 == numBlocks (commonRefinement p1 p2))
isEquivalent p1 p2 = comparePartitions p1 p2 == Equivalent
-- Instead of checking whether one partition is a refinement of another AND
-- also checking vice versa. We can check the direction at once, computing the