subDesTagesMitExtraKaese 4 роки тому
батько
коміт
8444d40dfc

+ 3 - 0
.gitmodules

@@ -0,0 +1,3 @@
+[submodule "lib/json"]
+	path = lib/json
+	url = https://github.com/nlohmann/json

+ 3 - 0
README.md

@@ -13,3 +13,6 @@
 
 [Analyse und Erweiterung](docs/analyse.md) des TF-Frameworks
 -------
+
+
+python3 -m pip install tensorflow

+ 16 - 0
config.json

@@ -0,0 +1,16 @@
+{
+  "fpgas": [
+    {
+      "ip":   "192.168.1.33",
+      "port": 1234
+    },
+    {
+      "ip":   "192.168.1.35",
+      "port": 1234
+    },
+    {
+      "ip":   "192.168.1.36",
+      "port": 1234
+    }
+  ]
+}

+ 2 - 0
include/conv2D.hpp

@@ -16,6 +16,7 @@
 #include "../lib/mlfpga/include/modules.hpp"
 
 #include "entrypoint.hpp"
+#include "helper.hpp"
 
 namespace tf_lib {
 
@@ -23,6 +24,7 @@ namespace tf_lib {
   using namespace std::chrono;
   typedef FunctionDefHelper FDH;
 
+  extern ShapeFunction conv2d_shape_fn;
 
   class Conv2DOp : public AsyncOpKernel {
     public:

+ 7 - 1
include/entrypoint.hpp

@@ -1,6 +1,9 @@
 #ifndef ENTRY_FPGA_H
 #define ENTRY_FPGA_H
 
+#include <fstream>
+#include "../lib/json/single_include/nlohmann/json.hpp"
+
 #include "tensorflow/core/framework/op.h"
 #include "tensorflow/core/framework/shape_inference.h"
 #include "tensorflow/core/framework/function.h"
@@ -13,9 +16,12 @@
 #include "dummyBigOp.hpp"
 #include "../lib/mlfpga/include/connectionManager.hpp"
 
+#include "helper.hpp"
+
 namespace tf_lib {
-  void __attribute__ ((constructor)) init(void);
+  void __attribute__ ((constructor)) construct(void);
 
   extern ConnectionManager connectionManager;
+  void init();
 }
 #endif

+ 31 - 0
include/helper.hpp

@@ -0,0 +1,31 @@
+
+#ifndef HELPER_FPGA_H
+#define HELPER_FPGA_H 
+
+#include "tensorflow/core/framework/op.h"
+#include "tensorflow/core/framework/shape_inference.h"
+#include "tensorflow/core/framework/function.h"
+#include "tensorflow/core/util/tensor_format.h"
+
+#include "tensorflow/core/lib/math/math_util.h"
+
+namespace tf_lib {
+
+
+  using namespace tensorflow;
+  using namespace tensorflow::shape_inference;
+  
+  typedef Status (*ShapeFunction)(InferenceContext*);
+
+  Status DimensionsFromShape(ShapeHandle shape, TensorFormat format,
+                            DimensionHandle* batch_dim,
+                            gtl::MutableArraySlice<DimensionHandle> spatial_dims,
+                            DimensionHandle* filter_dim,
+                            InferenceContext* context);
+
+  Status ShapeFromDimensions(DimensionHandle batch_dim,
+                            gtl::ArraySlice<DimensionHandle> spatial_dims,
+                            DimensionHandle filter_dim, TensorFormat format,
+                            InferenceContext* context, ShapeHandle* shape);
+}
+#endif

+ 0 - 3
install.sh

@@ -1,3 +0,0 @@
-#!/bin/bash
-
-python3 -m pip install tensorflow

+ 1 - 0
lib/json

@@ -0,0 +1 @@
+Subproject commit dd7e25927fe7a49c81d07943c32444f0a9011665

+ 0 - 2
lib/mlfpga/include/connectionManager.hpp

@@ -37,8 +37,6 @@ class ConnectionManager {
     ConnectionManager();
     ~ConnectionManager();
 
-    void startFromTensorflow();
-
     void addFPGA(const char* ip, const uint port, bool bindSelf=false);
     void start();
 

+ 0 - 13
lib/mlfpga/src/connectionManager.cpp

@@ -30,19 +30,6 @@ void ConnectionManager::removeFinishedWorkers() {
   );
 }
 
-void ConnectionManager::startFromTensorflow() {
-  if(isRunning())
-    return;
-
-  addFPGA("192.168.1.33", 1234);
-  addFPGA("192.168.1.34", 1234);
-  addFPGA("192.168.1.35", 1234);
-
-  start();
-
-  printf("fpga server started\n");
-}
-
 void ConnectionManager::start() {
   running = true;
   sendResult = std::async(std::launch::async, &ConnectionManager::sendThread, this);


+ 51 - 1
src/conv2D.cpp

@@ -7,12 +7,62 @@ namespace tf_lib {
   volatile int inParallel = 0;
   std::mutex printMu;
 
+  ShapeFunction conv2d_shape_fn = [](InferenceContext* c) {
+    //INPUT: NHWC
+    //KERNEL: HWIO
+    //OUTPUT: NHWC
+
+    constexpr int num_spatial_dims = 2;
+    TensorFormat data_format;
+    FormatFromString("NHWC", &data_format);
+    FilterTensorFormat filter_format;
+    FilterFormatFromString("HWIO", &filter_format);
+
+    ShapeHandle input_shape, filter_shape, output_shape;
+    TF_RETURN_IF_ERROR(c->WithRank(c->input(0), 4, &input_shape));
+    TF_RETURN_IF_ERROR(c->WithRank(c->input(1), 4, &filter_shape));
+
+    DimensionHandle batch_size_dim;
+    DimensionHandle input_depth_dim;
+    gtl::InlinedVector<DimensionHandle, 2> input_spatial_dims(2);
+    TF_RETURN_IF_ERROR(DimensionsFromShape(
+      input_shape, data_format, &batch_size_dim,
+      absl::MakeSpan(input_spatial_dims), &input_depth_dim, c));
+
+    DimensionHandle output_depth_dim = c->Dim(
+      filter_shape, GetFilterDimIndex<num_spatial_dims>(filter_format, 'O'));
+      /*
+    DimensionHandle filter_rows_dim = c->Dim(
+      filter_shape, GetFilterDimIndex<num_spatial_dims>(filter_format, 'H'));
+    DimensionHandle filter_cols_dim = c->Dim(
+      filter_shape, GetFilterDimIndex<num_spatial_dims>(filter_format, 'W'));
+      */
+    DimensionHandle filter_input_depth_dim = c->Dim(
+      filter_shape, GetFilterDimIndex<num_spatial_dims>(filter_format, 'I'));
+
+    DimensionHandle output_rows, output_cols, output_channels;
+    c->Add(input_spatial_dims[0], 0, &output_rows);
+    c->Add(input_spatial_dims[1], 0, &output_cols);
+
+    c->Multiply(filter_input_depth_dim, output_depth_dim, &output_channels);
+
+    std::vector<DimensionHandle> out_dims(4);
+    out_dims[0] = batch_size_dim;
+    out_dims[1] = output_rows;
+    out_dims[2] = output_cols;
+    out_dims[3] = output_channels;
+
+    output_shape = c->MakeShape(out_dims);
+    c->set_output(0, output_shape);
+    return Status::OK();
+  };
+
   Conv2DOp::Conv2DOp(OpKernelConstruction* context) : AsyncOpKernel(context) {
     instance = instances++;
   };
 
   void Conv2DOp::ComputeAsync(OpKernelContext* context, DoneCallback done) {
-    connectionManager.startFromTensorflow();
+    init();
     // Input tensor is of the following dimensions:
     // [ batch, in_rows, in_cols, in_depth ]
     const Tensor& input = context->input(0);

+ 1 - 1
src/dummyBigOp.cpp

@@ -7,7 +7,7 @@ namespace tf_lib {
   };
 
   void DummyBigOp::ComputeAsync(OpKernelContext* context, DoneCallback done) {
-    connectionManager.startFromTensorflow();
+    init();
     // Input tensor is of the following dimensions:
     // [ batch, in_rows, in_cols, in_depth ]
     const Tensor& input = context->input(0);

+ 1 - 1
src/dummyOp.cpp

@@ -7,7 +7,7 @@ namespace tf_lib {
   };
 
   void DummyOp::ComputeAsync(OpKernelContext* context, DoneCallback done) {
-    connectionManager.startFromTensorflow();
+    init();
     // Input tensor is of the following dimensions:
     // [ batch, in_rows, in_cols, in_depth ]
     const Tensor& input = context->input(0);

+ 27 - 105
src/entrypoint.cpp

@@ -6,114 +6,11 @@ namespace tf_lib {
   using namespace tensorflow;
   using namespace tensorflow::shape_inference;
 
-
-  Status DimensionsFromShape(ShapeHandle shape, TensorFormat format,
-                            DimensionHandle* batch_dim,
-                            gtl::MutableArraySlice<DimensionHandle> spatial_dims,
-                            DimensionHandle* filter_dim,
-                            InferenceContext* context) {
-    const int32 rank = GetTensorDimsFromSpatialDims(spatial_dims.size(), format);
-    // Batch.
-    *batch_dim = context->Dim(shape, GetTensorBatchDimIndex(rank, format));
-    // Spatial.
-    for (int spatial_dim_index = 0; spatial_dim_index < spatial_dims.size();
-        ++spatial_dim_index) {
-      spatial_dims[spatial_dim_index] = context->Dim(
-          shape, GetTensorSpatialDimIndex(rank, format, spatial_dim_index));
-    }
-    // Channel.
-    *filter_dim = context->Dim(shape, GetTensorFeatureDimIndex(rank, format));
-    if (format == FORMAT_NCHW_VECT_C) {
-      TF_RETURN_IF_ERROR(context->Multiply(
-          *filter_dim,
-          context->Dim(shape, GetTensorInnerFeatureDimIndex(rank, format)),
-          filter_dim));
-    }
-    return Status::OK();
-  }
-
-  Status ShapeFromDimensions(DimensionHandle batch_dim,
-                            gtl::ArraySlice<DimensionHandle> spatial_dims,
-                            DimensionHandle filter_dim, TensorFormat format,
-                            InferenceContext* context, ShapeHandle* shape) {
-    const int32 rank = GetTensorDimsFromSpatialDims(spatial_dims.size(), format);
-    std::vector<DimensionHandle> out_dims(rank);
-
-    // Batch.
-    out_dims[tensorflow::GetTensorBatchDimIndex(rank, format)] = batch_dim;
-    // Spatial.
-    for (int spatial_dim_index = 0; spatial_dim_index < spatial_dims.size();
-        ++spatial_dim_index) {
-      out_dims[tensorflow::GetTensorSpatialDimIndex(
-          rank, format, spatial_dim_index)] = spatial_dims[spatial_dim_index];
-    }
-    // Channel.
-    if (format == tensorflow::FORMAT_NCHW_VECT_C) {
-      // When format is NCHW_VECT_C, factor the feature map count
-      // into the outer feature count and the inner feature count (=4).
-      TF_RETURN_IF_ERROR(context->Divide(
-          filter_dim, 4, /*evenly_divisible=*/true,
-          &out_dims[tensorflow::GetTensorFeatureDimIndex(rank, format)]));
-      out_dims[GetTensorInnerFeatureDimIndex(rank, format)] = context->MakeDim(4);
-    } else {
-      out_dims[tensorflow::GetTensorFeatureDimIndex(rank, format)] = filter_dim;
-    }
-
-    *shape = context->MakeShape(out_dims);
-    return tensorflow::Status::OK();
-  }
-
   REGISTER_OP("MyConv2D")
       .Input("input: float")
       .Input("filter: float")
       .Output("output: float")
-      .SetShapeFn([](InferenceContext* c) {
-        //INPUT: NHWC
-        //KERNEL: HWIO
-        //OUTPUT: NHWC
-
-        constexpr int num_spatial_dims = 2;
-        TensorFormat data_format;
-        FormatFromString("NHWC", &data_format);
-        FilterTensorFormat filter_format;
-        FilterFormatFromString("HWIO", &filter_format);
-
-        ShapeHandle input_shape, filter_shape, output_shape;
-        TF_RETURN_IF_ERROR(c->WithRank(c->input(0), 4, &input_shape));
-        TF_RETURN_IF_ERROR(c->WithRank(c->input(1), 4, &filter_shape));
-
-        DimensionHandle batch_size_dim;
-        DimensionHandle input_depth_dim;
-        gtl::InlinedVector<DimensionHandle, 2> input_spatial_dims(2);
-        TF_RETURN_IF_ERROR(DimensionsFromShape(
-          input_shape, data_format, &batch_size_dim,
-          absl::MakeSpan(input_spatial_dims), &input_depth_dim, c));
-
-        DimensionHandle output_depth_dim = c->Dim(
-          filter_shape, GetFilterDimIndex<num_spatial_dims>(filter_format, 'O'));
-        DimensionHandle filter_rows_dim = c->Dim(
-          filter_shape, GetFilterDimIndex<num_spatial_dims>(filter_format, 'H'));
-        DimensionHandle filter_cols_dim = c->Dim(
-          filter_shape, GetFilterDimIndex<num_spatial_dims>(filter_format, 'W'));
-        DimensionHandle filter_input_depth_dim = c->Dim(
-          filter_shape, GetFilterDimIndex<num_spatial_dims>(filter_format, 'I'));
-
-        DimensionHandle output_rows, output_cols, output_channels;
-        c->Add(input_spatial_dims[0], 0, &output_rows);
-        c->Add(input_spatial_dims[1], 0, &output_cols);
-
-        c->Multiply(filter_input_depth_dim, output_depth_dim, &output_channels);
-
-        std::vector<DimensionHandle> out_dims(4);
-        out_dims[0] = batch_size_dim;
-        out_dims[1] = output_rows;
-        out_dims[2] = output_cols;
-        out_dims[3] = output_channels;
-
-        output_shape = c->MakeShape(out_dims);
-        c->set_output(0, output_shape);
-        return Status::OK();
-      });
+      .SetShapeFn(conv2d_shape_fn);
 
   REGISTER_KERNEL_BUILDER(Name("MyConv2D").Device(DEVICE_CPU), Conv2DOp);
 
@@ -141,7 +38,32 @@ namespace tf_lib {
 
   ConnectionManager connectionManager;
 
-  void __attribute__ ((constructor)) init(void) {
+  bool hasInitialized = false;
+
+  void init() {
+    if(hasInitialized)
+      return;
+
+    std::ifstream configStream("config.json");
+    nlohmann::json config;
+    configStream >> config;
+
+    auto fpgas = config["fpgas"];
+
+    for(uint i=0; i<fpgas.size(); i++) {
+      string ip = fpgas[i]["ip"];
+      const uint port = fpgas[i]["port"];
+      connectionManager.addFPGA(ip.c_str(), port);
+      printf("added fpga %u at %s:%u\n", i, ip.c_str(), port);
+    }
+
+    connectionManager.start();
+
+    printf("fpga server started\n");
+    hasInitialized = true;
+  }
+
+  void __attribute__ ((constructor)) construct(void) {
     printf("fpga library loaded\n");
   }
 

+ 65 - 0
src/helper.cpp

@@ -0,0 +1,65 @@
+#include "helper.hpp"
+
+namespace tf_lib {
+
+  using namespace tensorflow;
+  using namespace tensorflow::shape_inference;
+
+
+  Status DimensionsFromShape(ShapeHandle shape, TensorFormat format,
+                            DimensionHandle* batch_dim,
+                            gtl::MutableArraySlice<DimensionHandle> spatial_dims,
+                            DimensionHandle* filter_dim,
+                            InferenceContext* context) {
+    const int32 rank = GetTensorDimsFromSpatialDims(spatial_dims.size(), format);
+    // Batch.
+    *batch_dim = context->Dim(shape, GetTensorBatchDimIndex(rank, format));
+    // Spatial.
+    for (uint spatial_dim_index = 0; spatial_dim_index < spatial_dims.size();
+        ++spatial_dim_index) {
+      spatial_dims[spatial_dim_index] = context->Dim(
+          shape, GetTensorSpatialDimIndex(rank, format, spatial_dim_index));
+    }
+    // Channel.
+    *filter_dim = context->Dim(shape, GetTensorFeatureDimIndex(rank, format));
+    if (format == FORMAT_NCHW_VECT_C) {
+      TF_RETURN_IF_ERROR(context->Multiply(
+          *filter_dim,
+          context->Dim(shape, GetTensorInnerFeatureDimIndex(rank, format)),
+          filter_dim));
+    }
+    return Status::OK();
+  }
+
+  Status ShapeFromDimensions(DimensionHandle batch_dim,
+                            gtl::ArraySlice<DimensionHandle> spatial_dims,
+                            DimensionHandle filter_dim, TensorFormat format,
+                            InferenceContext* context, ShapeHandle* shape) {
+    const int32 rank = GetTensorDimsFromSpatialDims(spatial_dims.size(), format);
+    std::vector<DimensionHandle> out_dims(rank);
+
+    // Batch.
+    out_dims[tensorflow::GetTensorBatchDimIndex(rank, format)] = batch_dim;
+    // Spatial.
+    for (uint spatial_dim_index = 0; spatial_dim_index < spatial_dims.size();
+        ++spatial_dim_index) {
+      out_dims[tensorflow::GetTensorSpatialDimIndex(
+          rank, format, spatial_dim_index)] = spatial_dims[spatial_dim_index];
+    }
+    // Channel.
+    if (format == tensorflow::FORMAT_NCHW_VECT_C) {
+      // When format is NCHW_VECT_C, factor the feature map count
+      // into the outer feature count and the inner feature count (=4).
+      TF_RETURN_IF_ERROR(context->Divide(
+          filter_dim, 4, /*evenly_divisible=*/true,
+          &out_dims[tensorflow::GetTensorFeatureDimIndex(rank, format)]));
+      out_dims[GetTensorInnerFeatureDimIndex(rank, format)] = context->MakeDim(4);
+    } else {
+      out_dims[tensorflow::GetTensorFeatureDimIndex(rank, format)] = filter_dim;
+    }
+
+    *shape = context->MakeShape(out_dims);
+    return tensorflow::Status::OK();
+  }
+
+}