Python Django Flask 2019. 12. 5. 11:30
import matplotlib


from multiprocessing import Pool
from pymatgen import Composition, Element
from pymatgen.analysis.phase_diagram import PhaseDiagram, PDEntry, PDPlotter
#from IPython.display import display
from flask import Flask, render_template, request, jsonify
from flask_caching import Cache
import os, time, datetime
from functools import partial
from itertools import repeat
import re

app = Flask(__name__)
app.config["CACHE_TYPE"] = "memcached"
app.config["CACHE_MEMCACHED_SERVER"] = ['localhost']


def querying2(a, b):
    e = []
    comp = a[0]
    elements = [str(e) for e in Composition(b).elements]
    elem = [str(e) for e in Composition(comp).elements]
    if len(elem) <= len(elements):
        if len(set(elements) & set(elem)) == len(set(elem)):
            e.append(PDEntry(comp, float(a[2]), attribute=a[1]))
            return e

@app.route('/pd/<string:composition>/', methods=['GET'])
def new_pd(composition='Li7La3Zr2O12', num_proc=4):
    entries = []
    #composition=composition.replace("-", "").strip()
    composition=re.sub(r'\W+', '', composition)
    elements = [str(e) for e in Composition(composition).elements]
    data = [line.strip().split(', ') for line in open('oqmd-compounds.csv', 'r')]

    with Pool(processes=num_proc) as pool:, b=composition), data)

    for a in ent:
        if a != None: entries.append(a[0])

    pd = PhaseDiagram(entries)

    timestamp ="%Y-%m-%d-%H-%M-%S.%f")

    result={'timestamp':timestamp, 'composition':composition, 'pd_filename':pd_filename}

    return jsonify(result)

#new_pd('Li7La3Zr2O12', 4)

if __name__ == '__main__':'', port=7000, debug=True, threaded=True)

related link:


Flask: Multi-Processing in Flask - Glitchdata

Use multiprocessing module as a task queue, and over come GIL in python. When you have computationally intensive tasks in your website (or scripts), it is conventional to use a task queue such as Celery. Using Celery requires some amount of setup and if yo


posted by kimsooil