diff --git a/.gitignore b/.gitignore
index c1f7655373b7..a8ebdb4a397f 100644
--- a/.gitignore
+++ b/.gitignore
@@ -73,6 +73,7 @@ LLVM/IR2Vec-Engine/IR2Vec-Binaries/*
 !LLVM/IR2Vec-Engine/IR2Vec-Binaries/README.md
 # Data
 /data
+install
 
 # trained model
 trained_model/
diff --git a/MLCompilerBridge b/MLCompilerBridge
index 253f31ac1a67..53bf989f6e34 160000
--- a/MLCompilerBridge
+++ b/MLCompilerBridge
@@ -1 +1 @@
-Subproject commit 253f31ac1a678017ce1b217ffb6f92d25d501325
+Subproject commit 53bf989f6e34125a2be6dd6b832cb852fb6922ce
diff --git a/clang/lib/StaticAnalyzer/Core/SarifDiagnostics.cpp b/clang/lib/StaticAnalyzer/Core/SarifDiagnostics.cpp
index 556c7f03ba0b..12332aaf936f 100644
--- a/clang/lib/StaticAnalyzer/Core/SarifDiagnostics.cpp
+++ b/clang/lib/StaticAnalyzer/Core/SarifDiagnostics.cpp
@@ -129,7 +129,7 @@ static json::Object createArtifactLocation(const FileEntry &FE,
   auto I = llvm::find_if(Artifacts, [&](const json::Value &File) {
     if (const json::Object *Obj = File.getAsObject()) {
       if (const json::Object *FileLoc = Obj->getObject("location")) {
-        std::optional<StringRef> URI = FileLoc->getString("uri");
+        Optional<StringRef> URI = FileLoc->getString("uri");
         return URI && URI->equals(FileURI);
       }
     }
diff --git a/llvm/include/llvm/CodeGen/MLRegAlloc.h b/llvm/include/llvm/CodeGen/MLRegAlloc.h
index 94ef6547f22c..6b30256493ef 100644
--- a/llvm/include/llvm/CodeGen/MLRegAlloc.h
+++ b/llvm/include/llvm/CodeGen/MLRegAlloc.h
@@ -5,10 +5,13 @@
 #include "InterferenceCache.h"
 #include "LiveDebugVariables.h"
 #include "MLModelRunner/MLModelRunner.h"
+#include "MLModelRunner/ONNXModelRunner/onnx.h"
 #include "RegAllocBase.h"
 #include "SpillPlacement.h"
 #include "Spiller.h"
 #include "SplitKit.h"
+#include "grpc/RegisterAllocationInference/RegisterAllocationInference.pb.h"
+#include "multi_agent_env.h"
 #include "llvm/ADT/ArrayRef.h"
 #include "llvm/ADT/BitVector.h"
 #include "llvm/ADT/DenseMap.h"
@@ -64,9 +67,6 @@
 #include "llvm/Support/Timer.h"
 #include "llvm/Support/raw_ostream.h"
 #include "llvm/Target/TargetMachine.h"
-#include "MLModelRunner/ONNXModelRunner/onnx.h"
-#include "grpc/RegisterAllocationInference/RegisterAllocationInference.pb.h"
-#include "multi_agent_env.h"
 // gRPC includes
 #include "grpc/RegisterAllocation/RegisterAllocation.grpc.pb.h"
 #include "grpc/RegisterAllocationInference/RegisterAllocationInference.grpc.pb.h"
@@ -82,11 +82,10 @@
 #include <memory>
 #include <set>
 #include <sstream>
+#include <string>
 #include <tuple>
 #include <utility>
 #include <vector>
-#include <string>
-#include <map>
 
 #define DEBUG_TYPE "mlra-regalloc"
 
@@ -187,6 +186,7 @@ class MLRA : public RegAllocBase,
   registerallocationinference::Data ServerModeRequest;
   registerallocationinference::Data ClientModeResponse;
   registerallocationinference::RegisterProfileList ClientModeRequest;
+
 private:
   struct PipeResponse {
     std::string Action;
@@ -210,10 +210,10 @@ class MLRA : public RegAllocBase,
   // };
   // SmallMapVector<unsigned, RegisterProfile, 16> regProfMap;
   // RegisterProfileMap regProfMap;
-  std::unique_ptr<MLModelRunner> MLRunner;
+  // std::unique_ptr<MLModelRunner> MLRunner;
   json::Object JO;
   std::vector<TensorSpec> FeatureSpecs;
-  std::vector<void*> InputBuffers;
+  std::vector<void *> InputBuffers;
   SmallSetVector<unsigned, 8> regIdxs;
   // TensorSpec AdviceSpec;
   bool CommuResult;
@@ -277,11 +277,19 @@ class MLRA : public RegAllocBase,
   //
   unsigned getPhyRegForColor(LiveInterval &VirtReg, unsigned color,
                              SmallVector<unsigned, 4> &SplitVRegs);
-            
+
   Observation split_node_step(unsigned action) override;
-  void initPipeCommunication();
-  void processMLInputs(SmallSetVector<unsigned, 8> *updatedRegIdxs, bool IsStart = false, bool IsJson=false);
-  void processMLInputsProtobuf(SmallSetVector<unsigned, 8> *updatedRegIdxs, bool IsStart = false);
+
+  template <typename T> void initCommunication(T &);
+
+  template <typename T>
+  void processMLInputs(T &MLRunner, SmallSetVector<unsigned, 8> *updatedRegIdxs,
+                       bool IsStart = false, bool IsJson = false);
+
+  template <typename T>
+  void processMLInputsProtobuf(T &MLRunner,
+                               SmallSetVector<unsigned, 8> *updatedRegIdxs,
+                               bool IsStart = false);
   void printFeatures();
   // void processMLAdvice();
 
diff --git a/llvm/include/llvm/LinkAllPasses.h b/llvm/include/llvm/LinkAllPasses.h
index 0efb2beec59a..5433826bf8fe 100644
--- a/llvm/include/llvm/LinkAllPasses.h
+++ b/llvm/include/llvm/LinkAllPasses.h
@@ -236,8 +236,8 @@ namespace {
       (void) llvm::createHardwareLoopsPass();
       (void)llvm::createInjectTLIMappingsLegacyPass();
       (void) llvm::createcustom_loop_distributionPass();
-      // (void)llvm::createRDGWrapperPass();
-      // (void) llvm::createLoopDistributionWrapperPassPass();
+      (void)llvm::createRDGWrapperPass();
+      (void) llvm::createLoopDistributionWrapperPassPass();
 
       (void)new llvm::IntervalPartition();
       (void)new llvm::ScalarEvolutionWrapperPass();
diff --git a/llvm/include/llvm/Support/JSON.h b/llvm/include/llvm/Support/JSON.h
index 9d204faabbfa..abccb1cdf840 100644
--- a/llvm/include/llvm/Support/JSON.h
+++ b/llvm/include/llvm/Support/JSON.h
@@ -90,7 +90,7 @@ std::string fixUTF8(llvm::StringRef S);
 class Array;
 class ObjectKey;
 class Value;
-template <typename T> Value toJSON(const std::optional<T> &Opt);
+template <typename T> Value toJSON(const llvm::Optional<T> &Opt);
 
 /// An Object is a JSON object, which maps strings to heterogenous JSON values.
 /// It simulates DenseMap<ObjectKey, Value>. ObjectKey is a maybe-owned string.
@@ -140,14 +140,14 @@ class Object {
   // Look up a property, returning nullptr if it doesn't exist.
   Value *get(StringRef K);
   const Value *get(StringRef K) const;
-  // Typed accessors return std::nullopt/nullptr if
+  // Typed accessors return llvm::None/nullptr if
   //   - the property doesn't exist
   //   - or it has the wrong type
-  std::optional<std::nullptr_t> getNull(StringRef K) const;
-  std::optional<bool> getBoolean(StringRef K) const;
-  std::optional<double> getNumber(StringRef K) const;
-  std::optional<int64_t> getInteger(StringRef K) const;
-  std::optional<llvm::StringRef> getString(StringRef K) const;
+  llvm::Optional<std::nullptr_t> getNull(StringRef K) const;
+  llvm::Optional<bool> getBoolean(StringRef K) const;
+  llvm::Optional<double> getNumber(StringRef K) const;
+  llvm::Optional<int64_t> getInteger(StringRef K) const;
+  llvm::Optional<llvm::StringRef> getString(StringRef K) const;
   const json::Object *getObject(StringRef K) const;
   json::Object *getObject(StringRef K);
   const json::Array *getArray(StringRef K) const;
@@ -238,14 +238,14 @@ inline bool operator!=(const Array &L, const Array &R) { return !(L == R); }
 ///   object  (json::Object)
 ///
 /// The kind can be queried directly, or implicitly via the typed accessors:
-///   if (std::optional<StringRef> S = E.getAsString()
+///   if (llvm::Optional<StringRef> S = E.getAsString()
 ///     assert(E.kind() == Value::String);
 ///
 /// Array and Object also have typed indexing accessors for easy traversal:
 ///   Expected<Value> E = parse(R"( {"options": {"font": "sans-serif"}} )");
 ///   if (Object* O = E->getAsObject())
 ///     if (Object* Opts = O->getObject("options"))
-///       if (std::optional<StringRef> Font = Opts->getString("font"))
+///       if (llvm::Optional<StringRef> Font = Opts->getString("font"))
 ///         assert(Opts->at("font").kind() == Value::String);
 ///
 /// === Converting JSON values to C++ types ===
@@ -266,13 +266,13 @@ inline bool operator!=(const Array &L, const Array &R) { return !(L == R); }
 ///   - std::string
 ///   - vector<T>, where T is deserializable
 ///   - map<string, T>, where T is deserializable
-///   - std::optional<T>, where T is deserializable
+///   - llvm::Optional<T>, where T is deserializable
 /// ObjectMapper can help writing fromJSON() functions for object types.
 ///
 /// For conversion in the other direction, the serializer function is:
 ///    toJSON(const T&) -> json::Value
 /// If this exists, then it also allows constructing Value from T, and can
-/// be used to serialize vector<T>, map<string, T>, and std::optional<T>.
+/// be used to serialize vector<T>, map<string, T>, and llvm::Optional<T>.
 ///
 /// === Serialization ===
 ///
@@ -401,29 +401,29 @@ class Value {
     llvm_unreachable("Unknown kind");
   }
 
-  // Typed accessors return std::nullopt/nullptr if the Value is not of this
+  // Typed accessors return llvm::None/nullptr if the Value is not of this
   // type.
-  std::optional<std::nullptr_t> getAsNull() const {
+  llvm::Optional<std::nullptr_t> getAsNull() const {
     if (LLVM_LIKELY(Type == T_Null))
       return nullptr;
-    return std::nullopt;
+    return llvm::None;
   }
-  std::optional<bool> getAsBoolean() const {
+  llvm::Optional<bool> getAsBoolean() const {
     if (LLVM_LIKELY(Type == T_Boolean))
       return as<bool>();
-    return std::nullopt;
+    return llvm::None;
   }
-  std::optional<double> getAsNumber() const {
+  llvm::Optional<double> getAsNumber() const {
     if (LLVM_LIKELY(Type == T_Double))
       return as<double>();
     if (LLVM_LIKELY(Type == T_Integer))
       return as<int64_t>();
     if (LLVM_LIKELY(Type == T_UINT64))
       return as<uint64_t>();
-    return std::nullopt;
+    return llvm::None;
   }
   // Succeeds if the Value is a Number, and exactly representable as int64_t.
-  std::optional<int64_t> getAsInteger() const {
+  llvm::Optional<int64_t> getAsInteger() const {
     if (LLVM_LIKELY(Type == T_Integer))
       return as<int64_t>();
     if (LLVM_LIKELY(Type == T_UINT64)) {
@@ -439,9 +439,9 @@ class Value {
                       D <= double(std::numeric_limits<int64_t>::max())))
         return D;
     }
-    return std::nullopt;
+    return llvm::None;
   }
-  std::optional<uint64_t> getAsUINT64() const {
+  llvm::Optional<uint64_t> getAsUINT64() const {
     if (Type == T_UINT64)
       return as<uint64_t>();
     else if (Type == T_Integer) {
@@ -449,14 +449,14 @@ class Value {
       if (N >= 0)
         return as<uint64_t>();
     }
-    return std::nullopt;
+    return llvm::None;
   }
-  std::optional<llvm::StringRef> getAsString() const {
+  llvm::Optional<llvm::StringRef> getAsString() const {
     if (Type == T_String)
       return llvm::StringRef(as<std::string>());
     if (LLVM_LIKELY(Type == T_StringRef))
       return as<llvm::StringRef>();
-    return std::nullopt;
+    return llvm::None;
   }
   const json::Object *getAsObject() const {
     return LLVM_LIKELY(Type == T_Object) ? &as<json::Object>() : nullptr;
@@ -637,6 +637,7 @@ inline bool Object::erase(StringRef K) {
 /// A "cursor" marking a position within a Value.
 /// The Value is a tree, and this is the path from the root to the current node.
 /// This is used to associate errors with particular subobjects.
+class Path;
 class Path {
 public:
   class Root;
@@ -770,9 +771,9 @@ inline bool fromJSON(const Value &E, std::nullptr_t &Out, Path P) {
   return false;
 }
 template <typename T>
-bool fromJSON(const Value &E, std::optional<T> &Out, Path P) {
+bool fromJSON(const Value &E, llvm::Optional<T> &Out, Path P) {
   if (E.getAsNull()) {
-    Out = std::nullopt;
+    Out = None;
     return true;
   }
   T Result = {};
@@ -808,8 +809,8 @@ bool fromJSON(const Value &E, std::map<std::string, T> &Out, Path P) {
   return false;
 }
 
-// Allow serialization of std::optional<T> for supported T.
-template <typename T> Value toJSON(const std::optional<T> &Opt) {
+// Allow serialization of llvm::Optional<T> for supported T.
+template <typename T> Value toJSON(const llvm::Optional<T> &Opt) {
   return Opt ? Value(*Opt) : Value(nullptr);
 }
 
@@ -848,12 +849,12 @@ class ObjectMapper {
 
   /// Maps a property to a field, if it exists.
   /// If the property exists and is invalid, reports an error.
-  /// (Optional requires special handling, because missing keys are OK).
-  template <typename T> bool map(StringLiteral Prop, std::optional<T> &Out) {
+  /// (llvm::Optional requires special handling, because missing keys are OK).
+  template <typename T> bool map(StringLiteral Prop, llvm::Optional<T> &Out) {
     assert(*this && "Must check this is an object before calling map()");
     if (const Value *E = O->get(Prop))
       return fromJSON(*E, Out, P.field(Prop));
-    Out = std::nullopt;
+    Out = llvm::None;
     return true;
   }
 
diff --git a/llvm/include/llvm/Transforms/Scalar/IR2Vec-LOF/IR2Vec-SCC.h b/llvm/include/llvm/Transforms/Scalar/IR2Vec-LOF/IR2Vec-SCC.h
index 35dc213b720b..49509c14c59c 100644
--- a/llvm/include/llvm/Transforms/Scalar/IR2Vec-LOF/IR2Vec-SCC.h
+++ b/llvm/include/llvm/Transforms/Scalar/IR2Vec-LOF/IR2Vec-SCC.h
@@ -78,7 +78,7 @@ class RDGWrapperPass : public FunctionPass {
                        DOTData &rdg);
 };
 
-// RDGWrapperPass *createRDGWrapperPass();
+RDGWrapperPass *createRDGWrapperPass();
 
 } // namespace llvm
 
diff --git a/llvm/include/llvm/Transforms/Scalar/IR2Vec-LOF/LoopDistribution.h b/llvm/include/llvm/Transforms/Scalar/IR2Vec-LOF/LoopDistribution.h
index 3df737d56b24..088919647e24 100644
--- a/llvm/include/llvm/Transforms/Scalar/IR2Vec-LOF/LoopDistribution.h
+++ b/llvm/include/llvm/Transforms/Scalar/IR2Vec-LOF/LoopDistribution.h
@@ -74,14 +74,14 @@ class LoopDistribution {
   void setLid(unsigned int lid) { this->lid = lid; }
   void setPartition(std::string partition) { this->partition = partition; }
   
-  void computeDistribution(SmallVector<DataDependenceGraph *, 5> &SCCGraphs,
-                           SmallVector<Loop *, 5> &loops,
-                           SmallVector<std::string, 5> &dis_seqs);
+  // void computeDistribution(SmallVector<DataDependenceGraph *, 5> &SCCGraphs,
+  //                          SmallVector<Loop *, 5> &loops,
+  //                          SmallVector<std::string, 5> &dis_seqs);
 
-  void run(Function &F, FunctionAnalysisManager &fam,
-           SmallVector<DataDependenceGraph *, 5> &SCCGraphs,
-           SmallVector<Loop *, 5> &loops,
-           SmallVector<std::string, 5> &dis_seqs);
+  // void run(Function &F, FunctionAnalysisManager &fam,
+  //          SmallVector<DataDependenceGraph *, 5> &SCCGraphs,
+  //          SmallVector<Loop *, 5> &loops,
+  //          SmallVector<std::string, 5> &dis_seqs);
 
   bool
   findLoopAndDistribute(Function &F, ScalarEvolution *SE_, LoopInfo *LI_,
@@ -117,9 +117,9 @@ class LoopDistributionWrapperPass : public FunctionPass {
   }*/
   bool runOnFunction(Function &F) override;
 
-  void run(SmallVector<DataDependenceGraph *, 5> &SCCGraphs,
-           SmallVector<Loop *, 5> &loops,
-           SmallVector<std::string, 5> &dis_seqs);
+  // void run(SmallVector<DataDependenceGraph *, 5> &SCCGraphs,
+  //          SmallVector<Loop *, 5> &loops,
+  //          SmallVector<std::string, 5> &dis_seqs);
 
   void getAnalysisUsage(AnalysisUsage &AU) const;
 };
diff --git a/llvm/include/llvm/Transforms/Scalar/IR2Vec-LOF/custom_loop_distribution.h b/llvm/include/llvm/Transforms/Scalar/IR2Vec-LOF/custom_loop_distribution.h
index 6f3ef6cab128..4337cdafcdbe 100644
--- a/llvm/include/llvm/Transforms/Scalar/IR2Vec-LOF/custom_loop_distribution.h
+++ b/llvm/include/llvm/Transforms/Scalar/IR2Vec-LOF/custom_loop_distribution.h
@@ -42,9 +42,11 @@ class custom_loop_distribution : public FunctionPass, public LDEnv {
 private:
   void canonicalizeLoopsWithLoads();
 
-  void initPipeCommunication(const std::vector<std::string> &RDG_List);
+  template <typename T>
+  void initCommunication(T &MLRunner,
+                             const std::vector<std::string> &RDG_List);
 
-  std::unique_ptr<MLModelRunner> MLRunner;
+  // std::unique_ptr<MLModelRunner> MLRunner;
   std::ofstream outfile;
 };
 
diff --git a/llvm/lib/Analysis/Analysis.cpp b/llvm/lib/Analysis/Analysis.cpp
index ad46888b9c66..341ba35115dc 100644
--- a/llvm/lib/Analysis/Analysis.cpp
+++ b/llvm/lib/Analysis/Analysis.cpp
@@ -85,8 +85,8 @@ void llvm::initializeAnalysis(PassRegistry &Registry) {
   initializeLCSSAVerificationPassPass(Registry);
   initializeMemorySSAWrapperPassPass(Registry);
   initializeMemorySSAPrinterLegacyPassPass(Registry);
-  initializeRDGWrapperPassPass(Registry);
-  initializeInnerMostLoopPassPass(Registry);
+  // initializeRDGWrapperPassPass(Registry);
+  // initializeInnerMostLoopPassPass(Registry);
 }
 
 void LLVMInitializeAnalysis(LLVMPassRegistryRef R) {
diff --git a/llvm/lib/CodeGen/MLRegAlloc/MLRegAlloc.cpp b/llvm/lib/CodeGen/MLRegAlloc/MLRegAlloc.cpp
index 80c555c435de..d89428a3674b 100644
--- a/llvm/lib/CodeGen/MLRegAlloc/MLRegAlloc.cpp
+++ b/llvm/lib/CodeGen/MLRegAlloc/MLRegAlloc.cpp
@@ -319,17 +319,12 @@ grpc::Status MLRA::codeGen(grpc::ServerContext *context,
   return Status::OK;
 }
 
-void MLRA::processMLInputsProtobuf(SmallSetVector<unsigned, 8> *updatedRegIdxs,
+template <typename T>
+void MLRA::processMLInputsProtobuf(T &MLRunner,
+                                   SmallSetVector<unsigned, 8> *updatedRegIdxs,
                                    bool IsStart) {
   // errs() << "Inside processMLInputsProtobuf\n";
 
-  /// testting
-  // MLRunner->setRequest(&ClientTestRequest);
-  // std::pair<std::string, std::string> strPair("s", "umesh");
-  // MLRunner->populateFeatures(strPair);
-  // return;
-  /// testing
-
   regIdxs.clear();
 
   if (!updatedRegIdxs) {
@@ -436,11 +431,13 @@ void MLRA::printFeatures() {
   }
 }
 
-void MLRA::processMLInputs(SmallSetVector<unsigned, 8> *updatedRegIdxs,
+template <typename T>
+void MLRA::processMLInputs(T &MLRunner,
+                           SmallSetVector<unsigned, 8> *updatedRegIdxs,
                            bool IsStart, bool IsJson) {
   // errs() << "Inside processMLInputs\n";
   if (data_format == "protobuf") {
-    processMLInputsProtobuf(updatedRegIdxs, IsStart);
+    processMLInputsProtobuf(MLRunner, updatedRegIdxs, IsStart);
     return;
   }
   regIdxs.clear();
@@ -2398,7 +2395,7 @@ void MLRA::training_flow() {
   LLVM_DEBUG(errs() << "Done MLRA allocation for : " << MF->getName() << '\n');
 }
 
-void MLRA::initPipeCommunication() {
+template <typename T> void MLRA::initCommunication(T &MLRunner) {
   auto processOutput = [&](std::vector<int> &reply) {
     if (reply[0] == 0) {
       PipeResponseData.Action = "Split";
@@ -2434,7 +2431,7 @@ void MLRA::initPipeCommunication() {
       if (count < MIN_VARS || count > MAX_VARS) {
         errs() << "regProf size is not between 120 and 500\n";
         return;
-      }  
+      }
 
       for (auto it = MF->begin(); it != MF->end(); it++) {
         if (it->isEHFuncletEntry() || it->isEHPad() || it->isEHScopeEntry() ||
@@ -2447,7 +2444,7 @@ void MLRA::initPipeCommunication() {
           }
         }
       }
-      processMLInputs(nullptr, true, IsJson);
+      processMLInputs(MLRunner, nullptr, true, IsJson);
       isGraphSet = true;
     } else {
       JO["new"] = false;
@@ -2456,7 +2453,7 @@ void MLRA::initPipeCommunication() {
     size_t size;
     int *out;
     errs() << "Calling evaluate...\n";
-    MLRunner->evaluate<int *>(out, size);
+    MLRunner->template evaluate<int *>(out, size);
     std::vector<int> reply(out, out + size);
     errs() << "Reply:: ";
     for (auto x : reply)
@@ -2511,9 +2508,9 @@ void MLRA::initPipeCommunication() {
         // errs() << "**********************************\n";
         if (PipeResponseData.Action == "Split") {
           errs() << "calling processMLInputs upon splitting...\n";
-          processMLInputs(&updatedRegIdxs, false, IsJson);
-        } else
-          processMLInputs(nullptr, false, IsJson);
+            processMLInputs(MLRunner, &updatedRegIdxs, false, IsJson);
+          } else
+          processMLInputs(MLRunner, nullptr, false, IsJson);
       } else {
         // errs() << "ENTERED ELSE CASE!!!!!!!!!\n";
         this->CommuResult = false;
@@ -2582,22 +2579,21 @@ void MLRA::inference() {
   //   return;
   // }
   if (usePipe) {
-    std::string basename = "/tmp/" + mlra_pipe_name;  
-    BaseSerDes::Kind SerDesType;
+    std::string basename = "/tmp/" + mlra_pipe_name;
+    SerDesKind SerDesType;
     if (data_format == "json") {
-      SerDesType = BaseSerDes::Kind::Json;
+      SerDesType = SerDesKind::Json;
     } else if (data_format == "bytes") {
-      SerDesType = BaseSerDes::Kind::Bitstream;
+      SerDesType = SerDesKind::Bitstream;
     } else if (data_format == "protobuf") {
-      SerDesType = BaseSerDes::Kind::Protobuf;
+      SerDesType = SerDesKind::Protobuf;
     } else {
       errs() << "Invalid data format\n";
       return;
     }
-    MLRunner = std::make_unique<PipeModelRunner>(basename + ".out",
-                                                 basename + ".in", SerDesType);
-    MLRunner->setResponse(&ServerModeRequest);
-    initPipeCommunication();
+    auto MLRunner = std::make_unique<PipeModelRunner>(
+        basename + ".out", basename + ".in", SerDesType);
+    initCommunication(MLRunner);
   } else {
     if (enable_mlra_training) {
       // MLRunner =
@@ -2622,7 +2618,7 @@ void MLRA::inference() {
       agentMap[COLOR_NODE_AGENT] = new Agent(nodeColouringModelPath);
       agentMap[SPLIT_NODE_AGENT] = new Agent(nodeSplitingModelPath);
 
-      MLRunner = std::make_unique<ONNXModelRunner>(this, agentMap);
+      auto MLRunner = std::make_unique<ONNXModelRunner>(this, agentMap);
 
       bool emptyGraph = true;
       int count = 0;
@@ -2690,7 +2686,7 @@ void MLRA::inference() {
       allocatePhysRegsViaRL();
       return;
     } else {
-      MLRunner = std::make_unique<gRPCModelRunner<
+      auto MLRunner = std::make_unique<gRPCModelRunner<
           registerallocationinference::RegisterAllocationInference,
           registerallocationinference::RegisterAllocationInference::Stub,
           registerallocationinference::RegisterProfileList,
@@ -2699,220 +2695,9 @@ void MLRA::inference() {
 
       MLRunner->setResponse(&ClientModeResponse);
 
-      initPipeCommunication();
+      initCommunication(MLRunner);
     }
   }
-  bool isGraphSet = false;
-  // while (true) {
-  //   reply = new registerallocationinference::Data();
-  //   grpc::ClientContext context;
-  //   LLVM_DEBUG(errs() << "Printing register profile:\n";
-  //              printRegisterProfile());
-  //   if (!isGraphSet) {
-  //     request = new registerallocationinference::RegisterProfileList();
-  //     serializeRegProfData(request);
-  //     LLVM_DEBUG(errs() << "Call model first time\n");
-  //     if (request->mutable_regprof()->size() < 120 ||
-  //         request->mutable_regprof()->size() > 500) {
-  //       ORE->emit([&]() {
-  //         return MachineOptimizationRemark(
-  //                    DEBUG_TYPE, "MLRA skipped Function ",
-  //                    MF->getFunction().front().front().getDebugLoc(),
-  //                    &MF->front())
-  //                << MF->getFunction().getParent()->getSourceFileName() <<
-  //                "\t"
-  //                << MF->getFunction().getName()
-  //                << "--> skipped by MLRA (nodes not in serviceable range)";
-  //       });
-  //       return;
-  //     }
-
-  //     for (auto it = MF->begin(); it != MF->end(); it++) {
-  //       if (it->isEHFuncletEntry() || it->isEHPad() || it->isEHScopeEntry()
-  //       ||
-  //           it->isEHScopeReturnBlock()) {
-  //         return;
-  //       }
-  //       for (auto ist = it->begin(); ist != it->end(); ist++) {
-  //         if (ist->isEHLabel() || ist->isEHScopeReturn()) {
-  //           return;
-  //         }
-  //       }
-  //     }
-
-  //     ORE->emit([&]() {
-  //       return MachineOptimizationRemark(
-  //                  DEBUG_TYPE, "MLRA Allocating Function ",
-  //                  MF->getFunction().front().front().getDebugLoc(),
-  //                  &MF->front())
-  //              << MF->getFunction().getParent()->getSourceFileName() << "\t"
-  //              << MF->getFunction().getName() << "--> Allocated by MLRA";
-  //     });
-  //     /*errs() << "Before calling model \n";
-  //     if (std::string s;
-  //     google::protobuf::TextFormat::PrintToString(*request, &s)) { std::cout
-  //     << "Your message: " << s; } else { std::cerr << "Message not valid
-  //     (partial content: "
-  //                                                                                               << request->ShortDebugString() << ")\n";
-  //                                                         }
-  //     errs() << "Before calling -- requetObj\n";
-  //     if (std::string s;
-  //     google::protobuf::TextFormat::PrintToString(*request, &s)) { std::cout
-  //     << "Your message: " << s; } else { std::cerr << "Message not valid
-  //     (partial content: "
-  //                                                                                               << request->ShortDebugString() << ")\n";
-  //                                                         }
-  //    */
-  //     // Stub->getInfo(&context, *request, reply);
-  //     isGraphSet = true;
-  //     (errs() << "Processing funtion: " << MF->getName() << "\n");
-  //   } else {
-  //     // sendRegProfData<registerallocationinference::RegisterProfileList>(
-  //     //    request);
-  //     request->set_new_(false);
-  //     LLVM_DEBUG(errs() << "Call model again\n");
-  //     // Stub->getInfo(&context, *request, reply);
-  //   }
-  //   assert(request->mutable_regprof()->size() <= 1000 &&
-  //          "Graph size is greater than the expected.\n");
-  //   LLVM_DEBUG(errs() << "Before calling model \n");
-  //   /*  if (std::string s;
-  //      google::protobuf::TextFormat::PrintToString(*request, &s)) { std::cout
-  //      << "Your message: " << s; } else { std::cerr << "Message not valid
-  //      (partial content: "
-  //                                                                                               << request->ShortDebugString() << ")\n";
-  //                                                         }
-  //    */
-  //   Status status = Stub->getInfo(&context, *request, reply);
-  //   LLVM_DEBUG(errs() << "Status : " << status.error_code() << ": "
-  //                     << status.error_message() << "\n");
-  //   assert(status.ok() && "status i not OK.");
-  //   LLVM_DEBUG(errs() << "After calling model \n");
-  //   /*if (std::string s; google::protobuf::TextFormat::PrintToString(*reply,
-  //      &s)) { std::cout << "Yo
-  //   */
-  //   assert(reply->message() != "" && "reply msg is empty");
-  //   LLVM_DEBUG(errs() << "Taken performed : " << reply->message() << " vreg "
-  //                     << std::to_string(reply->regidx()) << " "
-  //                     << std::to_string(reply->payload()) << "\n");
-  //   // std::string str = "LLVM\n";
-  //   assert(!(reply->message() == "Split" && reply->regidx() == 0 &&
-  //            reply->payload() == 0) &&
-  //          "Error in python side...");
-  //   // response->set_payload(str);
-  //   if (reply->message() == "Color") {
-  //     ORE->emit([&]() {
-  //       return MachineOptimizationRemark(
-  //                  DEBUG_TYPE, "#Registers colored by MLRA:Greedy ",
-  //                  MF->getFunction().front().front().getDebugLoc(),
-  //                  &MF->front())
-  //              << "#Registers colored by MLRA:Greedy :: "
-  //              << std::to_string(reply->color_size()) + ":" +
-  //                     std::to_string(numUnsupportedRegs);
-  //     });
-
-  //     std::string ucf = "";
-  //     for (auto i : unsupportedClsFreq) {
-  //       ucf += "\n " + i.first.str() + " - " + std::to_string(i.second);
-  //     }
-
-  //     ORE->emit([&]() {
-  //       return MachineOptimizationRemark(
-  //                  DEBUG_TYPE, "Freq of unsupported reg cls",
-  //                  MF->getFunction().front().front().getDebugLoc(),
-  //                  &MF->front())
-  //              << "Freq of unsupported reg cls:\n"
-  //              << ucf;
-  //     });
-
-  //     ORE->emit([&]() {
-  //       return MachineOptimizationRemark(
-  //                  DEBUG_TYPE, "#Splits",
-  //                  MF->getFunction().front().front().getDebugLoc(),
-  //                  &MF->front())
-  //              << "#Splits: " << std::to_string(numSplits);
-  //     });
-
-  //     if (reply->color_size() == 0) {
-  //       LLVM_DEBUG(errs() << "*****Warning -" << MF->getName()
-  //                         << " - Predictions not generated for the graph\n");
-  //       return;
-  //     }
-
-  //     std::map<std::string, int64_t> colorMap;
-  //     unsigned numSpills = 0;
-  //     for (auto i : reply->color()) {
-  //       colorMap[i.key()] = i.value();
-  //       errs() << "RegName: " << i.key() << " color: " << i.value() << "\n";
-  //       if (i.value() == 0)
-  //         numSpills++;
-  //     }
-
-  //     ORE->emit([&]() {
-  //       return MachineOptimizationRemark(
-  //                  DEBUG_TYPE, "#Spills",
-  //                  MF->getFunction().front().front().getDebugLoc(),
-  //                  &MF->front())
-  //              << "#Spills predicted by MLRA: " << std::to_string(numSpills)
-  //              << "#Regs allocated excluding spills by MLRA: "
-  //              << std::to_string(reply->color_size() - numSpills);
-  //     });
-
-  //     this->FunctionVirtRegToColorMap[MF->getName()] = colorMap;
-  //     // assert(reply->funcname() == MF->getName());
-  //     allocatePhysRegsViaRL();
-  //     errs() << "ALLOCATION DONE for " << MF->getName() << "\n";
-  //     errs() << "***************************************************\n";
-  //     LLVM_DEBUG(errs() << "The ML allocated virtual registers: /n";
-  //                for (auto i
-  //                     : mlAllocatedRegs) errs()
-  //                << printReg(i, TRI) << "\t";
-  //                errs() << "Done MLRA allocation for : " << MF->getName()
-  //                       << '\n');
-  //     return;
-  //   }
-  //   if (reply->message() == "Split" || reply->message() == "SplitAndCapture")
-  //   {
-  //     unsigned splitRegIdx = reply->regidx();
-  //     int splitPoint = reply->payload();
-  //     SmallVector<unsigned, 2> NewVRegs;
-  //     LLVM_DEBUG(errs() << "==========================BEFORE "
-  //                          "SPLITTING==================================\n";
-  //                MF->dump(); errs() <<
-  //                "====================================="
-  //                                      "=======================\n");
-
-  //     errs() << "**************STARTING SPLITTING********************\n";
-  //     errs() << "Splitting regidx: " << splitRegIdx << " at " << splitPoint
-  //            << "\n";
-  //     if (splitVirtReg(splitRegIdx, splitPoint, NewVRegs)) {
-  //       SmallSetVector<unsigned, 8> updatedRegIdxs;
-  //       updateRegisterProfileAfterSplit(splitRegIdx, NewVRegs,
-  //       updatedRegIdxs); if (enable_dump_ig_dot)
-  //         dumpInterferenceGraph(std::to_string(SplitCounter));
-  //       if (enable_mlra_checks)
-  //         verifyRegisterProfile();
-  //       errs() << "Splitting done\n";
-  //       errs() << "**********************************\n";
-
-  //       request = new registerallocationinference::RegisterProfileList();
-  //       if (reply->message() == "Split")
-  //         sendRegProfData<registerallocationinference::RegisterProfileList>(
-  //             request, &updatedRegIdxs);
-  //       else
-  //         sendRegProfData<registerallocationinference::RegisterProfileList>(
-  //             request);
-  //     } else {
-  //       LLVM_DEBUG(
-  //           errs()
-  //           << "Still after spliting prediction; LLVM dees not
-  //           performit.\n");
-  //       request->set_result(false);
-  //     }
-  //   }
-  //   if (reply->message() == "Exit")
-  //     return;
-  // }
 }
 
 void MLRA::MLRegAlloc(MachineFunction &MF, SlotIndexes &Indexes,
@@ -3021,7 +2806,7 @@ void MLRA::MLRegAlloc(MachineFunction &MF, SlotIndexes &Indexes,
     // RunService(this, mlra_server_address);
     // training_flow();
     //????
-    MLRunner = std::make_unique<gRPCModelRunner<
+    auto MLRunner = std::make_unique<gRPCModelRunner<
         registerallocationinference::RegisterAllocationInference::Service,
         registerallocationinference::RegisterAllocationInference::Stub,
         registerallocationinference::RegisterProfileList,
diff --git a/llvm/lib/Support/JSON.cpp b/llvm/lib/Support/JSON.cpp
index c672a43b033e..e19ef633e7f6 100644
--- a/llvm/lib/Support/JSON.cpp
+++ b/llvm/lib/Support/JSON.cpp
@@ -39,30 +39,30 @@ const Value *Object::get(StringRef K) const {
     return nullptr;
   return &I->second;
 }
-std::optional<std::nullptr_t> Object::getNull(StringRef K) const {
+Optional<std::nullptr_t> Object::getNull(StringRef K) const {
   if (auto *V = get(K))
     return V->getAsNull();
-  return std::nullopt;
+  return None;
 }
-std::optional<bool> Object::getBoolean(StringRef K) const {
+Optional<bool> Object::getBoolean(StringRef K) const {
   if (auto *V = get(K))
     return V->getAsBoolean();
-  return std::nullopt;
+  return None;
 }
-std::optional<double> Object::getNumber(StringRef K) const {
+Optional<double> Object::getNumber(StringRef K) const {
   if (auto *V = get(K))
     return V->getAsNumber();
-  return std::nullopt;
+  return None;
 }
-std::optional<int64_t> Object::getInteger(StringRef K) const {
+Optional<int64_t> Object::getInteger(StringRef K) const {
   if (auto *V = get(K))
     return V->getAsInteger();
-  return std::nullopt;
+  return None;
 }
-std::optional<llvm::StringRef> Object::getString(StringRef K) const {
+Optional<llvm::StringRef> Object::getString(StringRef K) const {
   if (auto *V = get(K))
     return V->getAsString();
-  return std::nullopt;
+  return None;
 }
 const json::Object *Object::getObject(StringRef K) const {
   if (auto *V = get(K))
@@ -411,7 +411,7 @@ class Parser {
            C == 'e' || C == 'E' || C == '+' || C == '-' || C == '.';
   }
 
-  std::optional<Error> Err;
+  Optional<Error> Err;
   const char *Start, *P, *End;
 };
 
diff --git a/llvm/lib/Transforms/Hello-MLBridge/Hello.cpp b/llvm/lib/Transforms/Hello-MLBridge/Hello.cpp
index 4999cd05b26b..d387f725c959 100644
--- a/llvm/lib/Transforms/Hello-MLBridge/Hello.cpp
+++ b/llvm/lib/Transforms/Hello-MLBridge/Hello.cpp
@@ -301,7 +301,7 @@ struct HelloMLBridge : public ModulePass,
 
     auto StartTime = std::chrono::high_resolution_clock::now();
 
-    MLRunner = std::make_unique<PipeModelRunner>(
+    auto MLRunner = std::make_unique<PipeModelRunner>(
         basename + ".out", basename + ".in", SerDesType, &M->getContext());
 
     std::pair<std::string, std::vector<float>> p1("tensor", FeatureVector);
@@ -325,24 +325,12 @@ struct HelloMLBridge : public ModulePass,
     }
   }
   
-  void setTFModelRunner(int n) {
-    switch (n) {
-#define M(x)                                                                   \
-  case x:                                                                      \
-    MLRunner = std::make_unique<TFModelRunner<LinearModel##x>>(("output"));     \
-    break;
-      MODELS(M)
-#undef M
-    }
-    // MLRunner = std::make_unique<TFModelRunner<LinearModel1000>>("output");
-  }
-
   void TFinitCommunication() {
     auto StartTime = std::chrono::high_resolution_clock::now();
 
     std::pair<std::string, std::vector<float>> p1("x", FeatureVector);
 
-    setTFModelRunner(n);
+    auto MLRunner = std::make_unique<TFModelRunner<LinearModel1000>>("output");
     MLRunner->populateFeatures(p1);
     double Out = MLRunner->evaluate<float>();
 
@@ -368,11 +356,11 @@ struct HelloMLBridge : public ModulePass,
     if (usePipe) {
       basename = "/tmp/" + pipe_name;
       if (data_format == "json")
-        SerDesType = BaseSerDes::Kind::Json;
+        SerDesType = SerDesKind::Json;
       else if (data_format == "protobuf")
-        SerDesType = BaseSerDes::Kind::Protobuf;
+        SerDesType = SerDesKind::Protobuf;
       else if (data_format == "bytes")
-        SerDesType = BaseSerDes::Kind::Bitstream;
+        SerDesType = SerDesKind::Bitstream;
       else {
         errs() << "Invalid data format\n";
         exit(1);
@@ -382,7 +370,7 @@ struct HelloMLBridge : public ModulePass,
     } else {
       if (training) {
         errs() << "Using 1st gRPC flow...\n";
-        MLRunner = std::make_unique<
+        auto MLRunner = std::make_unique<
             gRPCModelRunner<helloMLBridgegRPC::HelloMLBridgeService::Service,
                             helloMLBridgegRPC::HelloMLBridgeService::Stub,
                             helloMLBridgegRPC::TensorResponse,
@@ -395,7 +383,7 @@ struct HelloMLBridge : public ModulePass,
         std::map<std::string, Agent *> agents;
         agents["agent"] = agent;
         auto StartTime = std::chrono::high_resolution_clock::now();
-        MLRunner = std::make_unique<ONNXModelRunner>(this, agents,
+        auto MLRunner = std::make_unique<ONNXModelRunner>(this, agents,
                                                      &this->M->getContext());
         populateFeatureVector();
         int Out = MLRunner->evaluate<int>();
@@ -412,7 +400,7 @@ struct HelloMLBridge : public ModulePass,
 
         helloMLBridgegRPC::TensorResponse request;
         helloMLBridgegRPC::ActionRequest response;
-        MLRunner = std::make_unique<
+        auto MLRunner = std::make_unique<
             gRPCModelRunner<helloMLBridgegRPC::HelloMLBridgeService,
                             helloMLBridgegRPC::HelloMLBridgeService::Stub,
                             helloMLBridgegRPC::TensorResponse,
@@ -456,9 +444,8 @@ struct HelloMLBridge : public ModulePass,
   }
 
 private:
-  std::unique_ptr<MLModelRunner> MLRunner;
   std::string basename;
-  BaseSerDes::Kind SerDesType;
+  SerDesKind SerDesType;
   Module *M;
 };
 
diff --git a/llvm/lib/Transforms/IPO/PosetRL/CMakeLists.txt b/llvm/lib/Transforms/IPO/PosetRL/CMakeLists.txt
index 8a051d08343a..486ea3fe742a 100644
--- a/llvm/lib/Transforms/IPO/PosetRL/CMakeLists.txt
+++ b/llvm/lib/Transforms/IPO/PosetRL/CMakeLists.txt
@@ -1,8 +1,5 @@
 add_llvm_component_library(LLVMPosetRL posetRL.cpp
 
-LINK_COMPONENTS
-IR2Vec
-
 ADDITIONAL_HEADER_DIRS
 ${LLVM_MAIN_INCLUDE_DIR}/llvm/Transforms
 ${LLVM_MAIN_INCLUDE_DIR}/llvm/Transforms/PosetRL
@@ -10,5 +7,6 @@ ${LLVM_MAIN_INCLUDE_DIR}/llvm/Transforms/PosetRL
 DEPENDS
 intrinsics_gen
 LLVMMLBridge
+LLVMIR2Vec
 )
-target_link_libraries(LLVMPosetRL PUBLIC LLVMMLBridge)
+target_link_libraries(LLVMPosetRL PUBLIC LLVMMLBridge LLVMIR2Vec)
diff --git a/llvm/lib/Transforms/IPO/PosetRL/posetRL.cpp b/llvm/lib/Transforms/IPO/PosetRL/posetRL.cpp
index 8212aa4e3244..2bb56c66ba92 100644
--- a/llvm/lib/Transforms/IPO/PosetRL/posetRL.cpp
+++ b/llvm/lib/Transforms/IPO/PosetRL/posetRL.cpp
@@ -1,5 +1,18 @@
 #include "llvm/Transforms/IPO/PosetRL/PosetRL.h"
+#include "MLModelRunner/MLModelRunner.h"
+#include "MLModelRunner/ONNXModelRunner/ONNXModelRunner.h"
+#include "MLModelRunner/PipeModelRunner.h"
+#include "MLModelRunner/Utils/MLConfig.h"
+#include "MLModelRunner/gRPCModelRunner.h"
+#include "SerDes/baseSerDes.h"
+#include "SerDes/bitstreamSerDes.h"
+#include "SerDes/jsonSerDes.h"
+#include "SerDes/protobufSerDes.h"
+#include "grpc/posetRL/posetRL.grpc.pb.h"
+#include "grpc/posetRL/posetRL.pb.h"
+#include "grpcpp/impl/codegen/status.h"
 #include "inference/poset_rl_env.h"
+#include "llvm/ADT/StringRef.h"
 #include "llvm/IR/LegacyPassManager.h"
 #include "llvm/IR/Module.h"
 #include "llvm/IR2Vec.h"
@@ -15,17 +28,10 @@
 #include "llvm/Transforms/IPO/PassManagerBuilder.h"
 #include <cstdlib>
 #include <fstream>
-#include "grpc/posetRL/posetRL.grpc.pb.h"
-#include "grpc/posetRL/posetRL.pb.h"
 #include <google/protobuf/text_format.h>
+#include <memory>
 #include <utility>
 #include <vector>
-#include "MLModelRunner/MLModelRunner.h"
-#include "MLModelRunner/ONNXModelRunner/ONNXModelRunner.h"
-#include "MLModelRunner/PipeModelRunner.h"
-#include "MLModelRunner/gRPCModelRunner.h"
-#include "MLModelRunner/Utils/MLConfig.h"
-#include "grpcpp/impl/codegen/status.h"
 
 using namespace llvm;
 using namespace grpc;
@@ -54,8 +60,8 @@ static cl::opt<std::string> server_address(
     cl::init("127.0.0.1:50051"));
 
 static cl::opt<std::string> pipe_name("pipe-name", cl::Hidden,
-                               cl::init("posetrl_pipe"),
-                               cl::desc("Name for pipe file"));
+                                      cl::init("posetrl_pipe"),
+                                      cl::desc("Name for pipe file"));
 
 namespace {
 struct PosetRL : public ModulePass,
@@ -64,45 +70,40 @@ struct PosetRL : public ModulePass,
   static char ID;
   PosetRL() : ModulePass(ID) {}
   bool runOnModule(Module &M) override {
-    assert(MLConfig::mlconfig != "" && "ml-config-path required" );
+    assert(MLConfig::mlconfig != "" && "ml-config-path required");
     this->M = &M;
     // Establish pipe communication
     if (usePipe) {
       // data_format can take values: protobuf, json, bytes
-      std::string basename =
-          "/tmp/" + pipe_name;
-
-      BaseSerDes::Kind SerDesType;
-      if (data_format == "json")
-        SerDesType = BaseSerDes::Kind::Json;
-      else if (data_format == "protobuf")
-        SerDesType = BaseSerDes::Kind::Protobuf;
+      std::string basename = "/tmp/" + pipe_name;
+
+      SerDesKind SerDesType;
+      if (data_format == "json") {
+        SerDesType = SerDesKind::Json;
+      } else if (data_format == "protobuf")
+        SerDesType = SerDesKind::Protobuf;
       else if (data_format == "bytes")
-        SerDesType = BaseSerDes::Kind::Bitstream;
+        SerDesType = SerDesKind::Bitstream;
       else {
         errs() << "Invalid data format\n";
         exit(1);
       }
 
-      MLRunner = std::make_unique<PipeModelRunner>(
+      auto MLRunner = std::make_unique<PipeModelRunner>(
           basename + ".out", basename + ".in", SerDesType, &M.getContext());
-      posetRLgRPC::EmbeddingResponse response;
-      posetRLgRPC::ActionRequest request;
-      MLRunner->setRequest(&response);
-      MLRunner->setResponse(&request);
-      initPipeCommunication();
+      initCommunication(MLRunner);
+
     } else {
       if (training) {
-        MLRunner = std::make_unique<gRPCModelRunner<
+        auto MLRunner = std::make_unique<gRPCModelRunner<
             posetRLgRPC::PosetRLService::Service,
             posetRLgRPC::PosetRLService::Stub, posetRLgRPC::EmbeddingResponse,
             posetRLgRPC::ActionRequest>>(server_address, this, &M.getContext());
       } else if (useONNX) {
-        Agent agent(MLConfig::mlconfig +
-                    "/posetrl/posetrl_model.onnx");
+        Agent agent(MLConfig::mlconfig + "/posetrl/posetrl_model.onnx");
         std::map<std::string, Agent *> agents;
         agents["agent"] = &agent;
-        MLRunner =
+        auto MLRunner =
             std::make_unique<ONNXModelRunner>(this, agents, &M.getContext());
         MLRunner->evaluate<int>();
         errs() << "Sequence: ";
@@ -112,24 +113,26 @@ struct PosetRL : public ModulePass,
       } else {
         posetRLgRPC::EmbeddingResponse request;
         posetRLgRPC::ActionRequest response;
-        MLRunner = std::make_unique<gRPCModelRunner<
+        auto MLRunner = std::make_unique<gRPCModelRunner<
             posetRLgRPC::PosetRLService, posetRLgRPC::PosetRLService::Stub,
             posetRLgRPC::EmbeddingResponse, posetRLgRPC::ActionRequest>>(
             server_address, &request, &response, &M.getContext());
         MLRunner->setRequest(&request);
         MLRunner->setResponse(&response);
-        initPipeCommunication();
+        initCommunication(MLRunner);
       }
     }
     return true;
   }
-  void initPipeCommunication() {
+
+  template <typename T> void initCommunication(T &MLRunner) {
     int passSequence = 0;
     while (passSequence != -1) {
       std::pair<std::string, std::vector<float>> p1("embedding",
                                                     getEmbeddings());
       MLRunner->populateFeatures(p1);
-      int Res = MLRunner->evaluate<int>();
+
+      int Res = MLRunner->template evaluate<int>();
       processMLAdvice(Res);
       passSequence = Res;
       errs() << "Sequence : " << passSequence << "\t";
@@ -139,9 +142,9 @@ struct PosetRL : public ModulePass,
   inline void processMLAdvice(int advice) { applySeq(advice); }
 
   Embedding getEmbeddings() override {
-    auto Ir2vec =
-        IR2Vec::Embeddings(*M, IR2Vec::IR2VecMode::FlowAware,
-                           MLConfig::mlconfig + "/ir2vec/seedEmbeddingVocab-300-llvm10.txt");
+    auto Ir2vec = IR2Vec::Embeddings(
+        *M, IR2Vec::IR2VecMode::FlowAware,
+        MLConfig::mlconfig + "/ir2vec/seedEmbeddingVocab-300-llvm10.txt");
     auto ProgVector = Ir2vec.getProgramVector();
     Embedding Vector(ProgVector.begin(), ProgVector.end());
     return Vector;
@@ -163,14 +166,13 @@ struct PosetRL : public ModulePass,
     }
   }
 
-  grpc::Status
-  applyActionGetEmbeddings(grpc::ServerContext *context,
-                           const ::posetRLgRPC::ActionRequest *request,
-                           ::posetRLgRPC::EmbeddingResponse *response) override {
+  grpc::Status applyActionGetEmbeddings(
+      grpc::ServerContext *context, const ::posetRLgRPC::ActionRequest *request,
+      ::posetRLgRPC::EmbeddingResponse *response) override {
     // errs() << "Action requested: " << request->action() << "\n";
     if (request->action() == -1) {
       return grpc::Status::OK;
-    } 
+    }
     if (request->action() != 0)
       processMLAdvice(request->action());
 
@@ -183,8 +185,8 @@ struct PosetRL : public ModulePass,
 
   grpc::Status
   queryCompiler(grpc::ServerContext *context,
-                           const ::posetRLgRPC::ActionRequest *request,
-                           ::posetRLgRPC::EmbeddingResponse *response) {
+                const ::posetRLgRPC::ActionRequest *request,
+                ::posetRLgRPC::EmbeddingResponse *response) override {
     if (request->action() == -1) {
       return grpc::Status::OK;
     } else if (request->action() != 0)
@@ -199,7 +201,6 @@ struct PosetRL : public ModulePass,
 
 private:
   Module *M;
-  std::unique_ptr<MLModelRunner> MLRunner;
 };
 } // namespace
 char PosetRL::ID = 0;
diff --git a/llvm/lib/Transforms/Scalar/IR2Vec-LOF/IR2Vec-SCC/IR2Vec-SCC.cpp b/llvm/lib/Transforms/Scalar/IR2Vec-LOF/IR2Vec-SCC/IR2Vec-SCC.cpp
index 4555c84dbf68..c93ba1da634f 100644
--- a/llvm/lib/Transforms/Scalar/IR2Vec-LOF/IR2Vec-SCC/IR2Vec-SCC.cpp
+++ b/llvm/lib/Transforms/Scalar/IR2Vec-LOF/IR2Vec-SCC/IR2Vec-SCC.cpp
@@ -45,11 +45,12 @@ RDGWrapperPass::RDGWrapperPass() : FunctionPass(ID) {
 }
 
 char RDGWrapperPass::ID = 0;
-// RDGWrapperPass *llvm::createRDGWrapperPass() { return new RDGWrapperPass(); }
+RDGWrapperPass *llvm::createRDGWrapperPass() { return new RDGWrapperPass(); }
 
 // static RegisterPass<RDGWrapperPass> X("RDG", "Build ReducedDependenceGraph",
 // true, true);
 
+namespace {}
 void RDGWrapperPass::Print_IR2Vec_File(DataDependenceGraph &G,
                                        std::string Filename,
                                        std::string ll_name, int loopid) {
diff --git a/llvm/lib/Transforms/Scalar/IR2Vec-LOF/LoopDistribution/CMakeLists.txt b/llvm/lib/Transforms/Scalar/IR2Vec-LOF/LoopDistribution/CMakeLists.txt
index 2e64e949727d..f9275bb84835 100644
--- a/llvm/lib/Transforms/Scalar/IR2Vec-LOF/LoopDistribution/CMakeLists.txt
+++ b/llvm/lib/Transforms/Scalar/IR2Vec-LOF/LoopDistribution/CMakeLists.txt
@@ -1,2 +1,10 @@
-add_llvm_component_library(LLVMLoopDistribution STATIC  LoopDistribution.cpp ../RDG/RDG.cpp)
-add_dependencies(LLVMLoopDistribution intrinsics_gen)
+add_llvm_library(LLVMLoopDistribution LoopDistribution.cpp ../RDG/RDG.cpp
+
+ADDITIONAL_HEADER_DIRS
+${LLVM_MAIN_INCLUDE_DIR}/llvm/Transforms
+${LLVM_MAIN_INCLUDE_DIR}/llvm
+${LLVM_MAIN_INCLUDE_DIR}/llvm/Transforms/Scalar/IR2Vec-LOF
+
+DEPENDS
+intrinsics_gen
+)
diff --git a/llvm/lib/Transforms/Scalar/IR2Vec-LOF/LoopDistribution/LoopDistribution.cpp b/llvm/lib/Transforms/Scalar/IR2Vec-LOF/LoopDistribution/LoopDistribution.cpp
index 4f3e10e64035..7a7d58a0c202 100644
--- a/llvm/lib/Transforms/Scalar/IR2Vec-LOF/LoopDistribution/LoopDistribution.cpp
+++ b/llvm/lib/Transforms/Scalar/IR2Vec-LOF/LoopDistribution/LoopDistribution.cpp
@@ -12,7 +12,8 @@
 #include "llvm/Analysis/LoopAccessAnalysis.h"
 #include "llvm/Analysis/LoopAnalysisManager.h"
 #include "llvm/InitializePasses.h"
-#include "llvm/Passes/PassBuilder.h"
+// #include "llvm/Passes/PassBuilder.h"
+#include "llvm/Transforms/IPO/PassManagerBuilder.h"
 
 #define LDIST_NAME "ir2vec-loop-distribution"
 #define DEBUG_TYPE LDIST_NAME
@@ -522,110 +523,6 @@ bool LoopDistribution::computeDistributionOnLoop(DataDependenceGraph *SCCGraph,
   return distributed;
 }
 
-/**
- * To be checked, doubt regardinng the analysis function
- *
- */
-void LoopDistribution::computeDistribution(
-    SmallVector<DataDependenceGraph *, 5> &SCCGraphs,
-    SmallVector<Loop *, 5> &loops, SmallVector<std::string, 5> &dis_seqs) {
-
-  int size = loops.size();
-
-  for (int i = 0; i < size; i++) {
-    PassBuilder pb;
-    FunctionAnalysisManager fam;
-    pb.registerFunctionAnalyses(fam);
-    Function &F = *loops[i]->getHeader()->getParent();
-    AA = &fam.getResult<AAManager>(F);
-    SE = &fam.getResult<ScalarEvolutionAnalysis>(F);
-    LI = &fam.getResult<LoopAnalysis>(F);
-    DT = &fam.getResult<DominatorTreeAnalysis>(F);
-    ORE = &fam.getResult<OptimizationRemarkEmitterAnalysis>(F);
-    auto &AC = fam.getResult<AssumptionAnalysis>(F);
-    auto &TTI = fam.getResult<TargetIRAnalysis>(F);
-    auto &TLI = fam.getResult<TargetLibraryAnalysis>(F);
-
-    auto &LAM = fam.getResult<LoopAnalysisManagerFunctionProxy>(F).getManager();
-    GetLAA = [&](Loop &L) -> const LoopAccessInfo & {
-      LoopStandardAnalysisResults AR = {*AA, AC,  *DT, *LI,
-                                        *SE, TLI, TTI, nullptr};
-      return LAM.getResult<LoopAccessAnalysis>(L, AR);
-    };
-
-    computeDistributionOnLoop(SCCGraphs[i], loops[i], dis_seqs[i]);
-  }
-}
-
-// void LoopDistribution::addTimer(Loop *first, Loop *last) {
-//   Loop *outerLoop = nullptr;
-//   Loop *tmpLoop = first;
-//   while (outerLoop == nullptr) {
-//     if (tmpLoop->getLoopDepth() == 1)
-//       outerLoop = tmpLoop;
-//     else
-//       tmpLoop = tmpLoop->getParentLoop();
-//   }
-//   assert(outerLoop);
-
-//   auto PH = outerLoop->getLoopPreheader();
-//   auto module = PH->getModule();
-//   auto context = &module->getContext();
-//   IRBuilder<> builder(*context);
-
-//   // Declare clock Function
-//   std::vector<Type *> clock_func_args;
-//   FunctionType *clock_func_type =
-//       FunctionType::get(IntegerType::get(*context, 64), clock_func_args, false);
-//   Function *clock_func = Function::Create(
-//       clock_func_type, GlobalValue::ExternalLinkage, "clock", module);
-//   clock_func->setCallingConv(CallingConv::C);
-
-//   // Declare printf Function
-//   PointerType *PointerTy_6 = PointerType::get(IntegerType::get(*context, 8), 0);
-//   std::vector<Type *> printf_func_args;
-//   printf_func_args.push_back(PointerTy_6);
-//   FunctionType *printf_func_type =
-//       FunctionType::get(IntegerType::get(*context, 32), printf_func_args, true);
-//   Function *printf_func = Function::Create(
-//       printf_func_type, GlobalValue::ExternalLinkage, "printf", module);
-//   printf_func->setCallingConv(CallingConv::C);
-
-//   CallInst *call_clock_t1 = CallInst::Create(
-//       PH->getModule()->getFunction("clock"), "", PH->getTerminator());
-//   call_clock_t1->setCallingConv(CallingConv::C);
-//   call_clock_t1->setTailCall(false);
-
-//   SmallVector<BasicBlock *, 4> exitBlks;
-//   if (last->getLoopDepth() == 1) {
-//     exitBlks.push_back(last->getExitBlock());
-//   } else {
-//     outerLoop->getExitBlocks(exitBlks);
-//   }
-
-//   for (auto blk : exitBlks) {
-
-//     auto firstInst = &blk->front();
-//     CallInst *call_clock_t2 =
-//         CallInst::Create(module->getFunction("clock"), "", firstInst);
-//     call_clock_t2->setCallingConv(CallingConv::C);
-//     call_clock_t2->setTailCall(false);
-//     BinaryOperator *sub = BinaryOperator::Create(
-//         Instruction::Sub, call_clock_t2, call_clock_t1, "", firstInst);
-
-//     builder.SetInsertPoint(sub->getNextNode());
-//     std::vector<Value *> printArgs;
-//     Value *formatStr = builder.CreateGlobalStringPtr(
-//         "Function - %s, Loop - %d, Time - %lld\n");
-//     Value *fnName = builder.CreateGlobalString(funcName);
-//     printArgs.push_back(formatStr);
-//     printArgs.push_back(fnName);
-//     printArgs.push_back(ConstantInt::get(*context, APInt(32, loopID)));
-//     printArgs.push_back(sub);
-//     builder.CreateCall(module->getFunction("printf"), printArgs);
-//   }
-// }
-
 Loop *LoopDistribution::findLoop(unsigned int lid) {
 
   // Build up a worklist of inner-loops to vectorize. This is necessary as the
@@ -753,80 +650,6 @@ bool LoopDistribution::runwithAnalysis(
   return isdis;
 }
 
-void LoopDistribution::run(Function &F, FunctionAnalysisManager &fam,
-                           SmallVector<DataDependenceGraph *, 5> &SCCGraphs,
-                           SmallVector<Loop *, 5> &loops,
-                           SmallVector<std::string, 5> &dis_seqs) {
-
-  int size = loops.size();
-  PassBuilder pb;
-  pb.registerFunctionAnalyses(fam);
-  for (int i = 0; i < size; i++) {
-    LLVM_DEBUG(errs() << i + 1 << "th iteration\n");
-    // Function &F = *loops[i]->getHeader()->getParent();
-    AA = &fam.getResult<AAManager>(F);
-    SE = &fam.getResult<ScalarEvolutionAnalysis>(F);
-    LI = &fam.getResult<LoopAnalysis>(F);
-    DT = &fam.getResult<DominatorTreeAnalysis>(F);
-    ORE = &fam.getResult<OptimizationRemarkEmitterAnalysis>(F);
-    auto &AC = fam.getResult<AssumptionAnalysis>(F);
-    auto &TTI = fam.getResult<TargetIRAnalysis>(F);
-    auto &TLI = fam.getResult<TargetLibraryAnalysis>(F);
-    LLVM_DEBUG(errs() << "Call to GETLAM...\n");
-
-    auto &LAM = fam.getResult<LoopAnalysisManagerFunctionProxy>(F).getManager();
-
-    fam.registerPass([&] { return LoopAnalysisManagerFunctionProxy(LAM); });
-
-    LLVM_DEBUG(errs() << "Call to GETLAA...\n");
-    GetLAA = [&](Loop &L) -> const LoopAccessInfo & {
-      LoopStandardAnalysisResults AR = {*AA, AC,  *DT, *LI,
-                                        *SE, TLI, TTI, nullptr};
-      return LAM.getResult<LoopAccessAnalysis>(L, AR);
-    };
-
-    computeDistributionOnLoop(SCCGraphs[i], loops[i], dis_seqs[i]);
-  }
-}
-
-void LoopDistributionWrapperPass::run(
-    SmallVector<DataDependenceGraph *, 5> &SCCGraphs,
-    SmallVector<Loop *, 5> &loops, SmallVector<std::string, 5> &dis_seqs) {
-
-  int size = loops.size();
-
-  for (int i = 0; i < size; i++) {
-    PassBuilder pb;
-    FunctionAnalysisManager fam;
-    pb.registerFunctionAnalyses(fam);
-    Function &F = *loops[i]->getHeader()->getParent();
-    dist_helper.AA = &fam.getResult<AAManager>(F);
-    dist_helper.SE = &fam.getResult<ScalarEvolutionAnalysis>(F);
-    dist_helper.LI = &fam.getResult<LoopAnalysis>(F);
-    dist_helper.DT = &fam.getResult<DominatorTreeAnalysis>(F);
-    dist_helper.ORE = &fam.getResult<OptimizationRemarkEmitterAnalysis>(F);
-    auto &AC = fam.getResult<AssumptionAnalysis>(F);
-    auto &TTI = fam.getResult<TargetIRAnalysis>(F);
-    auto &TLI = fam.getResult<TargetLibraryAnalysis>(F);
-
-    auto &LAM = fam.getResult<LoopAnalysisManagerFunctionProxy>(F).getManager();
-
-    dist_helper.GetLAA = [&](Loop &L) -> const LoopAccessInfo & {
-      LoopStandardAnalysisResults AR = {*(dist_helper.AA),
-                                        AC,
-                                        *(dist_helper.DT),
-                                        *(dist_helper.LI),
-                                        *(dist_helper.SE),
-                                        TLI,
-                                        TTI,
-                                        nullptr};
-      return LAM.getResult<LoopAccessAnalysis>(L, AR);
-    };
-
-    dist_helper.computeDistributionOnLoop(SCCGraphs[i], loops[i], dis_seqs[i]);
-  }
-}
-
 bool LoopDistributionWrapperPass::runOnFunction(Function &F) {
   auto AA = &getAnalysis<AAResultsWrapperPass>().getAAResults();
   auto SE = &getAnalysis<ScalarEvolutionWrapperPass>().getSE();
diff --git a/llvm/lib/Transforms/Scalar/IR2Vec-LOF/LoopDistributionServer/CMakeLists.txt b/llvm/lib/Transforms/Scalar/IR2Vec-LOF/LoopDistributionServer/CMakeLists.txt
index 464319b424ab..d8efd930b720 100644
--- a/llvm/lib/Transforms/Scalar/IR2Vec-LOF/LoopDistributionServer/CMakeLists.txt
+++ b/llvm/lib/Transforms/Scalar/IR2Vec-LOF/LoopDistributionServer/CMakeLists.txt
@@ -1,7 +1,9 @@
-add_llvm_library(LLVMLoopDistributionServer STATIC LoopDistributionServer.cpp
+add_llvm_library(LLVMLoopDistributionServer LoopDistributionServer.cpp
+
+ADDITIONAL_HEADER_DIRS
+${LLVM_MAIN_INCLUDE_DIR}/llvm/Transforms
+${LLVM_MAIN_INCLUDE_DIR}/llvm/Transforms/Scalar/IR2Vec-LOF
 
-LINK_COMPONENTS
-# gRPC
 DEPENDS
 intrinsics_gen
 LLVMMLBridge
diff --git a/llvm/lib/Transforms/Scalar/IR2Vec-LOF/LoopDistributionServer/LoopDistributionServer.cpp b/llvm/lib/Transforms/Scalar/IR2Vec-LOF/LoopDistributionServer/LoopDistributionServer.cpp
index 6ffd3f6523df..dcbb36845991 100644
--- a/llvm/lib/Transforms/Scalar/IR2Vec-LOF/LoopDistributionServer/LoopDistributionServer.cpp
+++ b/llvm/lib/Transforms/Scalar/IR2Vec-LOF/LoopDistributionServer/LoopDistributionServer.cpp
@@ -22,6 +22,8 @@
 #include <bits/stdint-intn.h>
 #include <cstdint>
 
+#define DEBUG_TYPE "ld-server"
+
 using namespace llvm;
 using namespace MLBridge;
 
@@ -162,17 +164,17 @@ struct LoopDistributionServerPass
     std::string pipe_name = "loopdistppipe";
     std::string basename = "/tmp/" + pipe_name; // change
 
-    BaseSerDes::Kind SerDesType;
+    SerDesKind SerDesType;
     if (data_format == "json") {
-      SerDesType = BaseSerDes::Kind::Json;
+      SerDesType = SerDesKind::Json;
     } else if (data_format == "bytes") {
-      SerDesType = BaseSerDes::Kind::Bitstream;
+      SerDesType = SerDesKind::Bitstream;
     } else if (data_format == "protobuf") {
-      SerDesType = BaseSerDes::Kind::Protobuf;
+      SerDesType = SerDesKind::Protobuf;
     } else {
       return;
     }
-    MLRunner = std::make_unique<PipeModelRunner>(
+    auto MLRunner = std::make_unique<PipeModelRunner>(
         basename + ".out", basename + ".in", SerDesType, &M->getContext());
     std::pair<std::string, long> p1("loopcost", (long)OriginalLoopCost);
     MLRunner->populateFeatures(p1);
@@ -244,7 +246,6 @@ struct LoopDistributionServerPass
   Module *M;
   uint64_t OriginalLoopCost;
   uint64_t DistributedLoopCost;
-  std::unique_ptr<MLModelRunner> MLRunner;
 };
 } // namespace
 char LoopDistributionServerPass::ID = 0;
diff --git a/llvm/lib/Transforms/Scalar/IR2Vec-LOF/custom_loop_distribution/custom_loop_distribution.cpp b/llvm/lib/Transforms/Scalar/IR2Vec-LOF/custom_loop_distribution/custom_loop_distribution.cpp
index 7b08f4dcdadb..3dc47200b1bb 100644
--- a/llvm/lib/Transforms/Scalar/IR2Vec-LOF/custom_loop_distribution/custom_loop_distribution.cpp
+++ b/llvm/lib/Transforms/Scalar/IR2Vec-LOF/custom_loop_distribution/custom_loop_distribution.cpp
@@ -90,8 +90,7 @@ void custom_loop_distribution::canonicalizeLoopsWithLoads() {
                 auto inst = dyn_cast<Instruction>(use);
                 if (inst && inst->getOpcode() != Instruction::Store &&
                     inst->getOpcode() != Instruction::PHI &&
-                    DT->dominates(st, inst))
-                {
+                    DT->dominates(st, inst)) {
                   SmallVector<Value *, 3> tuples;
                   tuples.push_back(src);
                   tuples.push_back(dest);
@@ -123,8 +122,9 @@ void custom_loop_distribution::canonicalizeLoopsWithLoads() {
   }
 }
 
-void custom_loop_distribution::initPipeCommunication(
-    const std::vector<std::string> &RDG_List) {
+template <typename T>
+void custom_loop_distribution::initCommunication(
+    T &MLRunner, const std::vector<std::string> &RDG_List) {
   int cnt = 1;
   for (auto rdg : RDG_List) {
     std::pair<std::string, std::string> p1("RDG", rdg);
@@ -132,7 +132,7 @@ void custom_loop_distribution::initPipeCommunication(
     errs() << "Features populated END...\n";
     int *out;
     size_t size;
-    MLRunner->evaluate<int *>(out, size);
+    MLRunner->template evaluate<int *>(out, size);
     errs() << "Func name: " << this->FName << " : " << cnt++ << "\n";
     std::vector<int> distSequence;
     for (int i = 0; i < size; i++) {
@@ -160,7 +160,7 @@ void custom_loop_distribution::initPipeCommunication(
   MLRunner->populateFeatures(p1);
   int *out;
   size_t size;
-  MLRunner->evaluate<int *>(out, size);
+  MLRunner->template evaluate<int *>(out, size);
   errs() << "Exit code: " << out[0] << "\n";
 }
 
@@ -174,7 +174,6 @@ bool custom_loop_distribution::runOnFunction(Function &F) {
   this->FName = F.getName();
   canonicalizeLoopsWithLoads();
 
-  
   SmallVector<DataDependenceGraph *, 5> SCCGraphs;
   SmallVector<Loop *, 5> loops;
 
@@ -199,19 +198,19 @@ bool custom_loop_distribution::runOnFunction(Function &F) {
   if (usePipe) {
     std::string basename = "/tmp/" + pipe_name;
 
-    BaseSerDes::Kind SerDesType;
+    SerDesKind SerDesType;
     if (data_format == "json") {
-      SerDesType = BaseSerDes::Kind::Json;
+      SerDesType = SerDesKind::Json;
     } else if (data_format == "bytes") {
-      SerDesType = BaseSerDes::Kind::Bitstream;
+      SerDesType = SerDesKind::Bitstream;
     } else if (data_format == "protobuf") {
-      SerDesType = BaseSerDes::Kind::Protobuf;
+      SerDesType = SerDesKind::Protobuf;
     } else {
       errs() << "Invalid data format\n";
       return false;
     }
 
-    MLRunner = std::make_unique<PipeModelRunner>(
+    auto MLRunner = std::make_unique<PipeModelRunner>(
         basename + ".out", basename + ".in", SerDesType, &M->getContext());
 
     outfile.open(data_format + "out.log", std::ios_base::app);
@@ -228,7 +227,7 @@ bool custom_loop_distribution::runOnFunction(Function &F) {
       return false;
     }
     (errs() << "Number rdg generated : " << RDG_List.size() << "\n");
-    initPipeCommunication(RDG_List);
+    initCommunication(MLRunner, RDG_List);
     outfile.close();
   } else if (useOnnx) {
 
@@ -253,7 +252,7 @@ bool custom_loop_distribution::runOnFunction(Function &F) {
       std::map<std::string, Agent *> agents;
       agents[SELECT_NODE_AGENT] = &node_selection_agent;
       agents[DISTRIBUTION_AGENT] = &distribution_agent;
-      MLRunner =
+      auto MLRunner =
           std::make_unique<ONNXModelRunner>(this, agents, &M->getContext());
       // runInference();
       MLRunner->evaluate<int64_t>();
@@ -265,11 +264,10 @@ bool custom_loop_distribution::runOnFunction(Function &F) {
   } else {
     loopdistribution::RDGData request;
     loopdistribution::Advice response;
-    MLRunner = std::make_unique<
+    auto MLRunner = std::make_unique<
         gRPCModelRunner<loopdistribution::LoopDistribution,
                         loopdistribution::LoopDistribution::Stub,
-                        loopdistribution::RDGData,
-                        loopdistribution::Advice>>(
+                        loopdistribution::RDGData, loopdistribution::Advice>>(
         server_address, &request, &response, &M->getContext());
     MLRunner->setRequest(&request);
     MLRunner->setResponse(&response);
@@ -277,7 +275,7 @@ bool custom_loop_distribution::runOnFunction(Function &F) {
     std::vector<std::string> RDG_List;
     RDG_List.insert(RDG_List.end(), data.input_rdgs_str.begin(),
                     data.input_rdgs_str.end());
-                    
+
     assert(RDG_List.size() == SCCGraphs.size() &&
            RDG_List.size() == loops.size() &&
            "RDG_List, SCCgraphs and loops list should of same size.");
@@ -287,7 +285,7 @@ bool custom_loop_distribution::runOnFunction(Function &F) {
       return false;
     }
     (errs() << "Number rdg generated : " << RDG_List.size() << "\n");
-    initPipeCommunication(RDG_List);
+    initCommunication(MLRunner, RDG_List);
   }
 
   LLVM_DEBUG(errs() << "Call to runwihAnalysis...\n");
diff --git a/mlir/lib/Transforms/HelloMLBridgePass.cpp b/mlir/lib/Transforms/HelloMLBridgePass.cpp
index 27da7916267e..af40faeee8b9 100644
--- a/mlir/lib/Transforms/HelloMLBridgePass.cpp
+++ b/mlir/lib/Transforms/HelloMLBridgePass.cpp
@@ -228,48 +228,45 @@
   M(49500)                                                                     \
   M(50000)
 
-static llvm::cl::opt<bool>
+using namespace mlir;
+using namespace grpc;
+using namespace MLBridge;
+using namespace helloMLBridgegRPC;
+
+namespace {
+llvm::cl::opt<bool>
     training("mlir-hello-training", llvm::cl::Hidden,
              llvm::cl::desc("whether it is training or inference"),
              llvm::cl::init(false));
-static llvm::cl::opt<std::string> server_address(
+llvm::cl::opt<std::string> server_address(
     "mlir-hello-server-address", llvm::cl::Hidden,
     llvm::cl::desc(
         "Starts the server in the given address, format <ip>:<port>"),
     llvm::cl::init("localhost:5050"));
 
-static llvm::cl::opt<std::string> data_format(
+llvm::cl::opt<std::string> data_format(
     "mlir-hello-data-format", llvm::cl::Hidden, llvm::cl::init("json"),
     llvm::cl::desc("Data format to use for communication with python model"));
 
-static llvm::cl::opt<bool>
-    useONNX("mlir-hello-use-onnx", llvm::cl::Hidden,
-            llvm::cl::desc("Use ONNX for inferencing model"),
-            llvm::cl::init(false));
+llvm::cl::opt<bool> useONNX("mlir-hello-use-onnx", llvm::cl::Hidden,
+                            llvm::cl::desc("Use ONNX for inferencing model"),
+                            llvm::cl::init(false));
 
-static llvm::cl::opt<bool>
+llvm::cl::opt<bool>
     usePipe("mlir-hello-use-pipe", llvm::cl::Hidden,
             llvm::cl::desc("Use pipe based interation with python model"),
             llvm::cl::init(false));
 
-static llvm::cl::opt<std::string>
-    pipe_name("mlir-hello-pipe-name", llvm::cl::Hidden, llvm::cl::init("dummy"),
-              llvm::cl::desc("Name for pipe file"));
-static llvm::cl::opt<int> n("mlir-hello-data-size", llvm::cl::Hidden,
-                            llvm::cl::init(1000),
-                            llvm::cl::desc("Size of input vector"));
-
-static llvm::cl::opt<bool>
-    useTF("mlir-hello-use-tf", llvm::cl::Hidden,
-          llvm::cl::desc("Use TF AOT for inferencing model"),
-          llvm::cl::init(false));
-
-using namespace mlir;
-using namespace grpc;
-using namespace MLBridge;
-using namespace helloMLBridgegRPC;
+llvm::cl::opt<std::string> pipe_name("mlir-hello-pipe-name", llvm::cl::Hidden,
+                                     llvm::cl::init("dummy"),
+                                     llvm::cl::desc("Name for pipe file"));
+llvm::cl::opt<int> n("mlir-hello-data-size", llvm::cl::Hidden,
+                     llvm::cl::init(1000),
+                     llvm::cl::desc("Size of input vector"));
 
-namespace {
+llvm::cl::opt<bool> useTF("mlir-hello-use-tf", llvm::cl::Hidden,
+                          llvm::cl::desc("Use TF AOT for inferencing model"),
+                          llvm::cl::init(false));
 
 std::random_device rd;
 std::mt19937 gen(5);
@@ -329,24 +326,12 @@ struct MLIRHelloMLBridge : public OperationPass<MLIRHelloMLBridge>,
     }
   };
 
-  void setTFModelRunner(int n) {
-    switch (n) {
-#define M(x)                                                                   \
-  case x:                                                                      \
-    MLRunner = new TFModelRunner<LinearModel##x>("output");                    \
-    break;
-      MODELS(M)
-#undef M
-    }
-    // MLRunner = new TFModelRunner<LinearModel1000>("output");
-  }
-
   void TFinitCommunication() {
     auto StartTime = std::chrono::high_resolution_clock::now();
 
     std::pair<std::string, std::vector<float>> p1("x", FeatureVector);
 
-    setTFModelRunner(n);
+    auto MLRunner = new TFModelRunner<LinearModel1000>("output");
     MLRunner->populateFeatures(p1);
     double Out = MLRunner->evaluate<float>();
 
@@ -378,7 +363,7 @@ struct MLIRHelloMLBridge : public OperationPass<MLIRHelloMLBridge>,
     } else {
       if (training) {
         HelloMLIRTraining *gRPCTrainer = new HelloMLIRTraining();
-        MLRunner = new gRPCModelRunner<
+        auto MLRunner = new gRPCModelRunner<
             helloMLBridgegRPC::HelloMLBridgeService::Service,
             helloMLBridgegRPC::HelloMLBridgeService::Stub,
             helloMLBridgegRPC::TensorResponse,
@@ -393,7 +378,7 @@ struct MLIRHelloMLBridge : public OperationPass<MLIRHelloMLBridge>,
         agents["agent"] = agent;
         auto StartTime = std::chrono::high_resolution_clock::now();
         Env = new HelloMLBridgeEnv();
-        MLRunner = new ONNXModelRunner(this, agents, nullptr);
+        auto MLRunner = new ONNXModelRunner(this, agents, nullptr);
         populateFeatureVector(FeatureVector);
         int Out = MLRunner->evaluate<int>();
         auto EndTime = std::chrono::high_resolution_clock::now();
@@ -409,7 +394,7 @@ struct MLIRHelloMLBridge : public OperationPass<MLIRHelloMLBridge>,
 
         helloMLBridgegRPC::TensorResponse request;
         helloMLBridgegRPC::ActionRequest response;
-        MLRunner =
+        auto MLRunner =
             new gRPCModelRunner<helloMLBridgegRPC::HelloMLBridgeService,
                                 helloMLBridgegRPC::HelloMLBridgeService::Stub,
                                 helloMLBridgegRPC::TensorResponse,
@@ -435,10 +420,9 @@ struct MLIRHelloMLBridge : public OperationPass<MLIRHelloMLBridge>,
   }
 
 private:
-  BaseSerDes::Kind SerDesType;
+  SerDesKind SerDesType;
   HelloMLBridgeEnv *Env;
   std::string basename = "/tmp/" + pipe_name;
-  MLModelRunner *MLRunner;
   static void populateFeatureVector(std::vector<float> &FeatureVector);
   void initCommunication();
   void setModelRunner(int n);
@@ -446,13 +430,13 @@ struct MLIRHelloMLBridge : public OperationPass<MLIRHelloMLBridge>,
 
 void MLIRHelloMLBridge::initCommunication() {
   if (data_format == "bytes") {
-    SerDesType = BaseSerDes::Kind::Bitstream;
+    SerDesType = SerDesKind::Bitstream;
   } else if (data_format == "json") {
-    SerDesType = BaseSerDes::Kind::Json;
+    SerDesType = SerDesKind::Json;
   }
   basename = "/tmp/" + pipe_name;
   auto StartTime = std::chrono::high_resolution_clock::now();
-  MLRunner =
+  auto MLRunner =
       new PipeModelRunner(basename + ".out", basename + ".in", SerDesType);
 
   std::pair<std::string, std::vector<float>> p1("tensor", FeatureVector);
@@ -479,8 +463,6 @@ void MLIRHelloMLBridge::populateFeatureVector(
   }
 }
 
-void MLIRHelloMLBridge::setModelRunner(int n) { MLRunner = nullptr; }
-
 } // end anonymous namespace
 
 std::unique_ptr<mlir::Pass> mlir::createMLIRHelloMLBridgePass() {