Skip to content

Commit

Permalink
Merge pull request #55 from Techainer/no_proto
Browse files Browse the repository at this point in the history
Removee GRPC due to not using + Increase version of some requirements
  • Loading branch information
duonglong289 committed Jun 9, 2022
2 parents 69eb140 + f99a81a commit 37e6953
Show file tree
Hide file tree
Showing 23 changed files with 27 additions and 805 deletions.
4 changes: 2 additions & 2 deletions docs/3_Concepts/1_Server.md
Original file line number Diff line number Diff line change
Expand Up @@ -72,7 +72,7 @@ name: mlchain-server # name of service
entry_file: server.py # python file contains object ServeModel
host: localhost # host service
port: 5000 # port service
server: flask # server option flask or grpc
server: flask # server option flask or starlette
wrapper: gunicorn # wrapper option None or gunicorn
gunicorn: # config gunicorn wrapper
timeout: 60 # max time limit for the server to process
Expand Down Expand Up @@ -107,7 +107,7 @@ Port to serve on.
#### server:
```--server STRING```

Type of server to run. Currently we support flask or grpc.
Type of server to run. Currently we support flask or starlette.

#### wrapper:
```--wrapper STRING```
Expand Down
2 changes: 1 addition & 1 deletion docs/4_Tutorials/2_Keras.md
Original file line number Diff line number Diff line change
Expand Up @@ -275,7 +275,7 @@ name: Fashion-MNIST classifier # name of service
entry_file: main.py # python file contains object ServeModel
host: localhost # host service
port: 5000 # port service
server: flask # option flask or starlette or grpc
server: flask # option flask or starlette
wrapper: None # option None or gunicorn
cors: true
dump_request: None # None or path folder log request
Expand Down
2 changes: 1 addition & 1 deletion docs/Model Deployment/mlconfig.md
Original file line number Diff line number Diff line change
Expand Up @@ -22,7 +22,7 @@ name: mlchain-server # name of service
entry_file: server.py # python file contains object ServeModel
host: localhost # host service
port: 5000 # port service
server: flask # option flask or grpc
server: flask # option flask or starlette
wrapper: gunicorn # option None or gunicorn
gunicorn: # config apm-server if uses gunicorn wrapper
timeout: 60
Expand Down
2 changes: 1 addition & 1 deletion docs/Model Deployment/tutorial.md
Original file line number Diff line number Diff line change
Expand Up @@ -168,7 +168,7 @@ name: Digit-Recognizer # name of service
entry_file: app.py # python file contains object ServeModel
host: localhost # host service
port: 5000 # port
server: flask # option flask or grpc
server: flask # option flask or starlette
wrapper: None # option None or gunicorn
cors: true
gunicorn: # config apm-server if uses gunicorn wrapper
Expand Down
2 changes: 1 addition & 1 deletion docs/commands/init.md
Original file line number Diff line number Diff line change
Expand Up @@ -28,7 +28,7 @@ name: mlchain-server # name of service
entry_file: server.py # python file contains object ServeModel
host: localhost # host service
port: 2222 # port service
server: flask # option flask or starlette or grpc
server: flask # option flask or starlette
trace: False # option True or False
queue: None # option None or rabbit or redis
wrapper: None # option None or gunicorn
Expand Down
2 changes: 1 addition & 1 deletion docs/getstarted/core_concepts.md
Original file line number Diff line number Diff line change
Expand Up @@ -15,7 +15,7 @@ real life example using our service.
## ML Deployment
Simple Machine Learning model deployment is the central feature of ML Chain library.
Our ServeModel function allows user to deploy their model without requiring software engineering knowledge.
We support Flask and grpc for website hosting.
We support Flask and starlette for website hosting.

[Read More...](../Model Deployment/general.md)

Expand Down
2 changes: 1 addition & 1 deletion mlchain/__init__.py
Original file line number Diff line number Diff line change
Expand Up @@ -7,7 +7,7 @@
)

# Parameters of MLchain
__version__ = "0.2.8"
__version__ = "0.2.9"

HOST = "https://www.api.mlchain.ml"
WEB_HOST = HOST
Expand Down
1 change: 0 additions & 1 deletion mlchain/cli/main.py
Original file line number Diff line number Diff line change
Expand Up @@ -5,7 +5,6 @@
import click
import flask
import starlette
import grpc
from .init import init_command
from .run import run_command
from .artifact import artifact_command
Expand Down
2 changes: 1 addition & 1 deletion mlchain/cli/mlconfig.yaml
Original file line number Diff line number Diff line change
Expand Up @@ -8,7 +8,7 @@ host: 0.0.0.0 # Host of service
port: 8001 # Port service

# Server config
server: flask # Option flask or starlette or grpc
server: flask # Option flask or starlette
wrapper: gunicorn # Option None or gunicorn
cors: true # Auto enable CORS
cors_allow_origins: # Allow origins for CORS
Expand Down
31 changes: 2 additions & 29 deletions mlchain/cli/run.py
Original file line number Diff line number Diff line change
Expand Up @@ -65,7 +65,6 @@ def get_env(_k):
op_gunicorn = click.option("--gunicorn", "wrapper", flag_value="gunicorn", help="Run server with gunicorn or not")
op_flask = click.option("--flask", "server", flag_value="flask", help="Run with Flask server")
op_starlette = click.option("--starlette", "server", flag_value="starlette", help="Run with Starlette server")
op_grpc = click.option("--grpc", "server", flag_value="grpc", help="Run with gRPC server")
op_worker = click.option("--workers", "-w", "workers", default=None, type=int, help="Number of workers")
op_thread = click.option("--threads", "-t", "threads", default=None, type=int, help="Number of threads")
op_mode = click.option("--mode", "-m", "mode", default=None, type=str, help="The mode of mlconfig")
Expand All @@ -84,7 +83,6 @@ def get_env(_k):
@op_gunicorn
@op_flask
@op_starlette
@op_grpc
@op_worker
@op_thread
@op_config
Expand Down Expand Up @@ -280,25 +278,7 @@ def run_command(
logger.info("Ngrok url: {0}".format(endpoint))
os.environ["NGROK_URL"] = endpoint


############
# Run with grpc
############
if server == "grpc":
from mlchain.server.grpc_server import GrpcServer

app = get_model(entry_file, serve_model=True)

if app is None:
raise Exception(
"Can not init model class from {0}. Please check mlconfig.yaml or {0} or mlchain run -m {{mode}}!".format(
entry_file
)
)

app = GrpcServer(app, name=name)
app.run(host, port)
elif wrapper == "gunicorn":
if wrapper == "gunicorn":
############
# Run with gunicorn
############
Expand Down Expand Up @@ -482,7 +462,7 @@ def load(self):
elif app.__class__.__name__ == "GrpcServer":
app.run(host, port, debug=debug)
elif isinstance(app, ServeModel):
if server not in ["starlette", "grpc"]:
if server != "starlette":
server = "flask"
if server == "flask":
from mlchain.server.flask_server import FlaskServer
Expand Down Expand Up @@ -532,13 +512,6 @@ def load(self):
debug=debug
)

elif server == "grpc":
from mlchain.server.grpc_server import GrpcServer

app = GrpcServer(app, name=name)
app.run(host, port)


def get_model(module, serve_model=False):
"""
Get the serve_model from entry_file
Expand Down
33 changes: 8 additions & 25 deletions mlchain/client/__init__.py
Original file line number Diff line number Diff line change
@@ -1,40 +1,23 @@
from mlchain import mlconfig
from mlchain.base import logger
from .grpc_client import GrpcClient
from .http_client import HttpClient


class Client(HttpClient, GrpcClient):
def __init__(self, api_key=None, api_address=None, serializer='json', timeout=5 * 60, headers={}, type='http',
class Client(HttpClient):
def __init__(self, api_key=None, api_address=None, serializer='json', timeout=5 * 60, headers={},
name: str = "", version: str = "", check_status=False):
assert isinstance(type, str), "type model must be a string"
self._api_key = api_key
self._api_address = api_address
self._serializer = serializer
self._timeout = timeout
self._headers = headers
self._type = type
if self._type.lower() == 'http':
HttpClient.__init__(self, api_key=api_key, api_address=api_address, serializer=serializer,
timeout=timeout, headers=headers, name=name, version=version,
check_status=check_status)
elif self._type.lower() == 'grpc':
GrpcClient.__init__(self, api_key=api_key, api_address=api_address, serializer=serializer,
timeout=timeout, headers=headers, name=name, version=version,
check_status=check_status)
else:
raise Exception("type must be http or grpc")
HttpClient.__init__(self, api_key=api_key, api_address=api_address, serializer=serializer,
timeout=timeout, headers=headers, name=name, version=version,
check_status=check_status)

def model(self, name: str = "", version: str = "", check_status=False):
if self._type.lower() == 'http':
return HttpClient(api_key=self._api_key, api_address=self._api_address, serializer=self._serializer,
timeout=self._timeout, headers=self._headers, name=name, version=version,
check_status=check_status)
if self._type.lower() == 'grpc':
return GrpcClient(api_key=self._api_key, api_address=self._api_address, serializer=self._serializer,
timeout=self._timeout, headers=self._headers, name=name, version=version,
check_status=check_status)

return HttpClient(api_key=self._api_key, api_address=self._api_address, serializer=self._serializer,
timeout=self._timeout, headers=self._headers, name=name, version=version,
check_status=check_status)

def get_model(name):
config = mlconfig.get_client_config(name)
Expand Down
49 changes: 0 additions & 49 deletions mlchain/client/grpc_client.py

This file was deleted.

2 changes: 0 additions & 2 deletions mlchain/config.py
Original file line number Diff line number Diff line change
Expand Up @@ -6,7 +6,6 @@
import sentry_sdk
from sentry_sdk.integrations.flask import FlaskIntegration
import datetime
from mlchain.utils.system_info import get_gpu_statistics

class BaseConfig(dict):
def __init__(self, env_key='', **kwargs):
Expand Down Expand Up @@ -162,7 +161,6 @@ def before_send(event, hint):
if mlconfig.MLCHAIN_SENTRY_DROP_MODULES:
event['modules'] = {}

event['extra']["gpuinfo"] = get_gpu_statistics()
return event

def init_sentry():
Expand Down
6 changes: 0 additions & 6 deletions mlchain/server/__init__.py
Original file line number Diff line number Diff line change
Expand Up @@ -11,9 +11,3 @@
except Exception as ex: # pragma: no cover
import warnings
warnings.warn("Can't import StarletteServer. {0}".format(ex))

try:
from .grpc_server import GrpcServer
except Exception as ex: # pragma: no cover
import warnings
warnings.warn("Can't import GrpcServer. {0}".format(ex))
86 changes: 0 additions & 86 deletions mlchain/server/grpc_server.py

This file was deleted.

Empty file removed mlchain/server/protos/__init__.py
Empty file.
Loading

0 comments on commit 37e6953

Please sign in to comment.