Index: /reasoner/evaluation.tex
===================================================================
--- /reasoner/evaluation.tex	(revision 257)
+++ /reasoner/evaluation.tex	(revision 258)
@@ -228,6 +228,7 @@
 \begin{itemize}
   \item reasoner 1.3.0 is faster and produces less constraints/re-evaluations although being more complete. 
+  \item reasoner 1.1.0 produces ineffective and less complex constraints that can be evaluated faster
   \item Win 10 is slower, also Open JDK 10. 
-  \item \textbf{Strange differences between runs - summarization problem with linux?}
+  \item Seems that models look structurally different on Linux than on Windows. Different model complexity and constraint evaluation measures. Strange, because VIL leads to same code for QualiMaster. Reason unclear. For creating the figures, replaced constraint metrics by windows values.
 \end{itemize}
 
@@ -261,5 +262,4 @@
 Figure \ref{fig:jenkins-1_30-ojdk10}:
 \begin{itemize}
-    \item less spiky than windows
     \item \textbf{data missing?}
 \end{itemize}
@@ -267,6 +267,7 @@
 Figure \ref{fig:pi-1_10-jdk8}:
 \begin{itemize}
-    \item less spiky than windows and jenkins
     \item factor 60 slower
+    \item evaluation speed
+    \item no time difference for larger models
     \item as version 1.1.0, 200+200 vs 30.000? output problems
     \item \textbf{data missing?}
@@ -275,5 +276,6 @@
 Figure \ref{fig:pi-1_30-jdk8}:
 \begin{itemize}
-    \item again less spiky than windows and jenkins
+    \item less spiky than windows and jenkins
+    \item no time difference for larger models
     \item factor 13 slower - ICPE paper)
     \item times sum up
Index: /reasoner/measures/log.txt
===================================================================
--- /reasoner/measures/log.txt	(revision 257)
+++ /reasoner/measures/log.txt	(revision 258)
@@ -182,10 +182,10 @@
 [1] " translation time diagram all"
 [1] " evaluation time diagram all"
-[1] "#tests;416"
-[1] "reasoning time;0.0285714285714286;920.879407051282;28460.2857142857"
-[1] "translation time;0;13.8444482600733;242.2"
-[1] "evaluation time;0;798.885966117216;26417.3428571429"
-[1] "constraints count;0;941.034855769231;24110"
-[1] "reevaluation count;0;1615.95147664835;46487.8571428571"
+[1] "#tests;478"
+[1] "reasoning time;0.0285714285714286;2790.09086471409;28689.8571428571"
+[1] "translation time;0;38.8088563458856;279.428571428571"
+[1] "evaluation time;0;2433.89591552102;26623.8571428571"
+[1] "constraints count;0;2659.4550209205;24110"
+[1] "reevaluation count;0;4696.50678421996;46492.7142857143"
 [1] "processing folder  W:/offlineFiles/EASy-concepts/reasoner/measures/benchmark-results-pi-1_3_0-jdk1_8-20190219"
 [1] " loading data (1)"
@@ -203,8 +203,8 @@
 [1] " translation time diagram all"
 [1] " evaluation time diagram all"
-[1] "#tests;454"
-[1] "reasoning time;0.0571428571428571;244.176431718062;6107.45714285714"
-[1] "translation time;0;100.193953220055;2782.71428571429"
-[1] "evaluation time;0;74.8047566603734;2528.48571428571"
-[1] "constraints count;0;50.5374449339207;6000"
-[1] "reevaluation count;0;779.189175582127;23702.2285714286"
+[1] "#tests;544"
+[1] "reasoning time;0.0571428571428571;671.583193277311;6285.85714285714"
+[1] "translation time;0;276.622168242297;3041.85714285714"
+[1] "evaluation time;0;199.457269782913;2626.71428571429"
+[1] "constraints count;0;42.1764705882353;6000"
+[1] "reevaluation count;0;2032.48671218487;23708.2857142857"
Index: /reasoner/measures/script.r
===================================================================
--- /reasoner/measures/script.r	(revision 257)
+++ /reasoner/measures/script.r	(revision 258)
@@ -20,6 +20,6 @@
 #tag repacement (happens before skipping)
 replaceTags <- data.frame(
-  c("REASONING 1", "REASONING 2", "SCENARIO-INC",   "SCENARIO-INST",  "IREASONING"), 
-  c("REASONING",   "REASONING",   "SCENARIO (inc)", "SCENARIO (rt)",  "REASONING (inc)"))
+  c("REASONING 1", "REASONING 2", "SCENARIO-INC",   "SCENARIO-INST",  "IREASONING",      "RT-VIL"), 
+  c("REASONING",   "REASONING",   "SCENARIO (inc)", "SCENARIO (rt)",  "REASONING (inc)", "REASONING (rt)"))
 names(replaceTags) <- c("orig", "subst")
 
@@ -175,5 +175,5 @@
     theme(legend.position=legendPos)
   if (regression) {
-    gg <- gg + geom_smooth(method=lm, linetype = "dashed")
+    gg <- gg + geom_smooth(method=lm, linetype = "dotted")
   }
   return (gg)
@@ -224,5 +224,5 @@
 # creates the diagrams for a data set to be written into dir using the given indicator (name)
 my.createDiagrams = function(data, dir, indicator) {
-  print(paste(" reasoning time diagram", indicator))
+  write(paste(" reasoning time diagram", indicator), stderr())
   gg <-
     my.createErrorBarDiagram(
@@ -236,5 +236,5 @@
     plot=gg)
   
-  print(paste(" translation time diagram", indicator))
+  write(paste(" translation time diagram", indicator), stderr())
   gg <- my.createErrorBarDiagram(
     data, 
@@ -247,5 +247,5 @@
     plot=gg)
 
-  print(paste(" evaluation time diagram", indicator))
+  write(paste(" evaluation time diagram", indicator), stderr())
   gg <- my.createErrorBarDiagram(
     data, 
@@ -266,9 +266,10 @@
   for (d in dirs) {
     if (!is.na(str_match(d, "benchmark-results.*"))) {
+      write(paste("processing folder ", d), stderr())
       print(paste("processing folder ", d))
-      print(" loading data (1)")
+      write(" loading data (1)", stderr())
       data <- my.readData(d, 1)
       my.createDiagrams(data, d, 1)
-      print(" loading data (all)")
+      write(" loading data (all)", stderr())
       data <- my.readData(d)
       my.createDiagrams(data, d, "all")
