vrijdag 20 december 2019

Profiling van dataframe

met het handje

# ==> Analyse
df1.info()
df1.nunique()
print(df1.describe())


df1[df1['VALID_BSN'].isnull()]
df1[df1['VALID_BSN']=='0'].info()


df1[df1['BSN'].isnull()]
df1[df1['BSN']=='999999999']


dubbele records
df1[df1.duplicated(subset=None)]





df1=xls_file.parse(0,skiprows=0,dtype=str)

df1=df1.astype({'VD_Ingangsdatum': 'datetime64', 'VD_Einddatum': 'datetime64'})

dictVeldnamen={'Valid_BSN':'VALID_BSN','LR_BSN': 'BSN', 'VD_Ingangsdatum': 'BEGIN_DATUM','VD_Einddatum':'EIND_DATUM','Code_Bron': 'CODE_BRON'}

df1.rename(columns=dictVeldnamen,inplace='true')

df1.drop_duplicates(subset=None, keep='last', inplace=True)


alternatief pandas-profiling 

# importing required packages
import pandas as pd
import pandas_profiling
import numpy as np

# importing the data
df = pd.read_csv('/Users/lukas/Downloads/titanic/train.csv')

pandas_profiling.ProfileReport(df)

Hoe kan je makkelijk een dataframe groeperen en per groep bewerkingen uitvoeren

Hoe kan je makkelijk een dataframe groeperen en per groep bewerkingen uitvoeren



def func1(name,group):
    print(group)

df1.groupby(['BSN']).apply(lambda x: func1(x.name,x))

def func2(name,group):
    print(name)

df1.groupby(['BSN','CODE_VOORZIENING']).apply(lambda x: func2(x.name,x))


voorbeeld

def checkOverlapPerGroepering(tgroep,dfx):
    global myresult
    # Maak een set
    dfx['myrange']=dfx.apply(lambda x: set(x['myrange']),axis=1)
 #   ==> ga bepalen er een join is tussen de myrange records van de bsn
    geenoverlap=True
    lstbsnoverlap=[]  
    for i in range(len(dfx)-1):
        geenoverlap=dfx['myrange'].iloc[i].isdisjoint(dfx['myrange'].iloc[i+1]) &  geenoverlap
    trecord={'groep':str(tgroep),'geenoverlap': geenoverlap}
    myresult=myresult.append(trecord,ignore_index=True)
   

>>>>> main 2>>>>>

myresult = pd.DataFrame(columns =['groep', 'geenoverlap'])
df1.groupby(['BSN']).apply(lambda x: checkOverlapPerGroepering(x.name,x))
df1.groupby(['BSN','CODE_VOORZIENING']).apply(lambda x: checkOverlapPerGroepering(x.name,x))

date datetime handigheidjes


hoe maak je van een String een datum  formaat  

  • convert the string to datetime with strptime()
  • converrt the datetime to date

dt.datetime.strptime('2016-07-15 00:00:00', '%Y-%m-%d %H:%M:%S').date())



Hoe maak je van een datum een string

date_time = now.strftime("%m/%d/%Y, %H:%M:%S")

hoe maak je een lijst van datums gebaseerd op begin en einddatum

Gebruik de pd.date_range functie

voorb
spelingrechts=7
spelinglinks=7

#Genereer per regel alle lijst met alle datumvelden op
df1['myrange']=df1.apply(lambda x: pd.date_range(x['BEGIN_DATUM']+timedelta(days=spelinglinks),x['EIND_DATUM']-timedelta(days=spelingrechts), freq='D'),axis=1)




Convert Datetimeindex to Index using specified date_format.

df1['myrange']=df1['myrange'].map(lambda x: x.strftime("%Y/%m/%d"))

voorbeeld: hoe berekenen je kosten per dag gebaseerd op begin en einddatum

# -*- coding: utf-8 -*-
"""
Created on Thu Dec 19 13:47:28 2019

@author: wagene002
"""

#https://stackoverflow.com/questions/43832484/expanding-a-dataframe-based-on-start-and-end-columns-speed
import numpy as np
import pandas as pd
from datetime import timedelta
import datetime as dt


df = pd.DataFrame()
df['BEGIN_DATUM'] = [dt.datetime(2010, 12, 31), dt.date(2013, 4, 5)]
df['EIND_DATUM'] = [dt.date(2011, 1, 3), dt.date(2016, 12, 12)]
df['country'] = ['US', 'EU']
df['letter'] = ['a', 'b']
df['bedrag'] = ['1200.0','44']
df['bedrag']= df['bedrag'].apply(lambda x : float(x))

df['diffdagen'] =((df['EIND_DATUM']-df['BEGIN_DATUM']) / np.timedelta64(1, 'D')).astype('int') + 1
 !!!!
alternatieve manier:  df['diffdagen'] = df.apply(lambda x: (x['EIND_DATUM']-x['BEGIN_DATUM']).days,axis=1)
 !!!!

df['dagbedrag'] = df['bedrag']/  df['diffdagen']


cols = list(df.columns)

#bepaal per regel het data id
df['data_id'] = np.arange(0, len(df))
df['BEGIN_JAAR']=df['BEGIN_DATUM'].map(lambda x:x.year)
df['EIND_JAAR']=df['EIND_DATUM'].map(lambda x:x.year)

#stop data in series
data_id = df['data_id']
start1 = df['BEGIN_JAAR']
end1 = df['EIND_JAAR']

#diff = ((end-start) / np.timedelta64(1, 'Y')).astype('int') + 1
#bepaal hoeveel jaren er voorkomen in een regel
diff = (end1-start1) + 1
#genereer het data_id uit per aantal gevonden jaren
repeated_id = np.repeat(data_id, diff)

time_df = pd.DataFrame(data={'data_id': repeated_id})
time_df = pd.merge(left=time_df, right=df[['data_id', 'BEGIN_JAAR','EIND_JAAR']], on=['data_id'])


time_df['year_id'] = np.arange(0, len(time_df))

min_year_id = time_df.groupby('data_id')['year_id'].min().reset_index().rename(columns={'year_id': 'min_year_id'})
time_df = pd.merge(left=time_df, right=min_year_id, on=['data_id'])
#years_to_add = (time_df['year_id'] - time_df['min_year_id']) * np.timedelta64(1, 'Y')
years_to_add = (time_df['year_id'] - time_df['min_year_id'])
time_df['JAARTAL'] = time_df['BEGIN_JAAR'] + years_to_add

#time_df = time_df[time_df['JAARTAL'].dt.dayofweek < 5]

dfuit = pd.merge(left=df, right=time_df[['data_id', 'JAARTAL']], on=['data_id'])


def fBepaalAantalDagen(tseries):
    ndagen=0
    year_start=dt.date(tseries['JAARTAL'], 1, 1)
    year_end=dt.date(tseries['JAARTAL'], 12, 31)
    #startdatum ligt in het jaar zelf
    if tseries['BEGIN_JAAR']== tseries['JAARTAL'] :
        #start in jaar en einddatum ligt in jaren erna
        if tseries['EIND_JAAR'] > tseries['JAARTAL'] :
            #alleen de dagen van begindatum tot 31-12 berekeen
            ndagen=  ((year_end-tseries['BEGIN_DATUM']).days + 1)
        #start en eind liggen in zelfde jaar
        elif tseries['EIND_JAAR'] == tseries['JAARTAL']:
            ndagen=  ((tseries['EIND_DATUM']-tseries['BEGIN_DATUM']).days + 1)
    #startdatum ligt voor het jaar zelf       
    if (tseries['BEGIN_JAAR'] < tseries['JAARTAL']) & (tseries['EIND_JAAR'] >=  tseries['JAARTAL']) :   
       # start ligt voor het jaar en eind ligt na het jaar
        if tseries['EIND_JAAR'] > tseries['JAARTAL'] :
            #tel hele jaar
            ndagen= ((year_end-year_start).days + 1)
        # start ligt voor jaar en einde ligt in het jaar
        elif tseries['EIND_JAAR'] == tseries['JAARTAL']:
            #tel dagen vanaf 1-1 tm einde
            ndagen=  ((tseries['EIND_DATUM']-year_start).days + 1)
    return ndagen


dfuit['aantal_dag_in_jaar']=dfuit.apply(lambda x: fBepaalAantalDagen(x),axis=1)




dfuit = dfuit[['JAARTAL']+cols]

t

vrijdag 15 november 2019

Freeze Python executable


pyinstaller entry_point.py -n foobar --onefile

https://hackernoon.com/the-one-stop-guide-to-easy-cross-platform-python-freezing-part-1-c53e66556a0a

vrijdag 8 november 2019

Ubuntu 18.04 installing with conda


Python3

standaard is python de python v2. Om Python3 standaard te maken voeg toe aan /etc/bash.bashrc

set python=python3
set pip=pip3


install miniconda

wget https://repo.anaconda.com/miniconda/Miniconda2-latest-Linux-x86_64.sh
sh Miniconda2-latest-Linux-x86_64.sh
 ?> Do you wish the installer to initialize Miniconda2 in your /root/.bashrc ? 
===> YES


dinsdag 15 oktober 2019

pydoc

pydoc

running mode 1
 python -m pydoc math


running mode 2

python -m pydoc -b

start a documentation web server

woensdag 9 oktober 2019

DASH: CHLOROMAPS

# -*- coding: utf-8 -*-
"""
Created on Tue Oct  8 10:28:13 2019

@author: wagenerj
"""

import dash
import dash_core_components as dcc
import dash_html_components as html
from dash.dependencies import Input, Output

import geopandas as gpd
import pandas as pd
import matplotlib.pyplot as plt
import random

import mymod
import chart_studio.plotly as py
import plotly.graph_objs as go



counties = mymod.geefGebiedenGJSON()
df= mymod.geefGebiedenGDF()

app = dash.Dash(__name__)

#werkeend blok alles in 1 commando
# =============================================================================
# mygraph= dcc.Graph(
#             figure = go.Figure(go.Choroplethmapbox(geojson=counties, locations=df.myindex, z=df.myval,
#                                         colorscale="Viridis", zmin=0, zmax=12,
#                                         marker_opacity=0.5, marker_line_width=0)
#    
#             ,layout=go.Layout(
#                 title='US Export of Plastic Scrap',
#                 showlegend=True,
#                 legend=go.layout.Legend(x=0, y=1.0),
#                 margin=go.layout.Margin(l=40, r=0, t=40, b=30)
#              )
#             ),style={'height': 300},id='my-graph') 
#
# app.layout=html.Div(mygraph)
#
# =============================================================================

# =============================================================================
# myfigure=go.Figure(go.Choroplethmapbox(geojson=counties, locations=df.myindex, z=df.myval,
#                                         colorscale="Viridis", zmin=0, zmax=6,
#                                         marker_opacity=0.5, marker_line_width=0)
#    
#             ,layout=go.Layout(
#                 title='Gebieden Amsterdam values ',
#                 showlegend=True,
#                 legend=go.layout.Legend(x=0, y=1.0),
#                 margin=go.layout.Margin(l=40, r=0, t=40, b=30)
#              ))
#
#
# myfigure.update_layout(mapbox_style="open-street-map", mapbox_zoom=3, mapbox_center = {"lat": 37.0902, "lon": -95.7129})
#            
# mygraph= dcc.Graph(figure=myfigure,style={'height': 600},id='graf1') 
#
# app.layout=html.Div([mygraph])
#
# =============================================================================
def makeChloromap() :
    myChloromapbox =go.Choroplethmapbox(geojson=counties, locations=df.myindex, z=df.myval, hovertext="ok",
                                            colorscale="Blues", zmin=0, zmax=6,
                                            marker_opacity=1, marker_line_width=0)
    myLayout=go.Layout(
                    title='Gebieden Amsterdam values ',
                    showlegend=True,
                    legend=go.layout.Legend(x=0, y=1.0),
                    margin=go.layout.Margin(l=40, r=0, t=40, b=30)
                 )
   
   
    myfigure=go.Figure(myChloromapbox, myLayout)
   
   
    myfigure.update_layout(mapbox_style="open-street-map", mapbox_zoom=10, mapbox_center = {"lat": 52.370216, "lon": 4.895168})
             
    mygraph= dcc.Graph(figure=myfigure,style={'height': 600,'width':900},id='graf1') 
    return mygraph

def makeChloro():
    myfigure = go.Figure(data=go.Choropleth(data=df,
        locations=df['myindex'], # Spatial coordinates
        z = df['myval'], # Data to be color-coded
        locationmode = 'country names', # set of locations match entries in `locations`
        colorscale = 'Reds',
        colorbar_title = "Millions USD",
    ))
#    myfigure.update_layout(mapbox_style="open-street-map", mapbox_zoom=10, mapbox_center = {"lat": 52.370216, "lon": 4.895168})
    mygraph= dcc.Graph(figure=myfigure,style={'height': 600,'width':900},id='graf2') 
    return mygraph


app.layout=html.Div([makeChloro()])




if __name__ == '__main__':
    app.run_server(debug=True)

vrijdag 4 oktober 2019

uWSGI Installation


Deployment of a Flask application for public access requires a real webserver be installed in front of the application. That, in turn, requires, a mechanism for linking the webserver to the Flask application.
In this post, we'll install the uWSGI package and use it to link a rudimentary Flask application to an Nginx server. As part of the uWSGI installation, we'll set up a Python virtual environment to house both the Flask application and the uWSGI package.
In order to follow this post, you should have a recent version of Python and Nginx (see sidebar).

uWSGI Installation

NOTE: As of January 28, 2019, these same instructions work for installing uWSGI version 2.0.17.1.
We used my Antsle to create a Ubuntu 16.04 installation (named "helium") where we have installed recent releases of Python and Nginx (see sidebar).
We're going to install uWSGI in a Python virtual environment, so first we set up a new virtual environment and activate it. NOTE: You can, of course, set up your virtual environment anywhere you have rights, but my common practice is to put them all in my home account under a single "venvs" directory.
$ cd                       # /home/joe is the working directory
$ mkdir venvs              # directory for virtual environments
$ cd venvs                 # 
$ python3 -m venv sam      # make sam virtual environment
$ source sam/bin/activate  # activate the virtual environment
Once the virtual environment is activated, the system prompt is prefixed with the name of the virtual environment - (sam). Check the Python version. Next we use which -a to check for the multiple python3 installations: Ubuntu distro, our Python 3.7.0 installation, and the python3 in the (sam) virtual environment.
(sam) joe@helium:~/venvs$ python --version
Python 3.7.0

(sam) joe@helium:~/venvs$ which -a python3
/home/joe/venvs/sam/bin/python3
/usr/local/bin/python3
/usr/bin/python3
With the virtual environment in place, we can install Flask and uWSGI. NOTE: The uWSGI install will fail unless you have a complete Python installation. In particular, you need to have the python3-dev package and the libssl-dev package installed. (See this post.)
Both Flask and uWSGI are installed in the virtual environment. Check the versions.
(sam) joe@helium:~/alex$ pip install flask
(sam) joe@helium:~/alex$ pip install uwsgi


(sam) joe@helium:~/venvs$ which flask
/home/joe/venvs/sam/bin/flask

(sam) joe@helium:~/venvs$ flask --version
Flask 1.0.2
Python 3.7.0 (default, Oct 19 2018, 14:09:51)
[GCC 5.4.0 20160609]

(sam) joe@helium:~/venvs$ which uwsgi
/home/joe/venvs/sam/bin/uwsgi

(sam) joe@helium:~/venvs$ uwsgi --version
2.0.17.1
Installations Complete: Here's the Plan
Now everything we need is installed: Python, Nginx, Flask and uWSGI. This is what we are going to do:
  1. We set up and run a simple Flask application without using either Nginx or uWSGI. Test the application using curl.
  2. We hook up our rudimentary Flask application to the Nginx server by using a uWSGI configuration file.
  3. As a bonus, we set up a second virtual environment ("western") with all the trimmings - uWSGI, Flask application, Nginx service, etc. - and run both applications at the same time.

Rudimentary Flask Application

There are many different ways to structure a Flask application. We'll use the following directory structure.
/home/joe
    |
    |-- /alex       (project directory)
        |
        | -- /paa   (application directory)
        .    |
        .    | -- /static  (css, js, etc.)
        .    | -- /templates  (html files)
        .    |
        .    | -- __init__.py
        .    | -- routes.py
        .    | :
        .    | :
        |
        | -- config.py
        | -- run_me.py
        | :
        | :
This structure is a little overblown for the rudimentary Flask application we are going to build, but it is illustrative of the setup for a simple application (meaning no Blueprints). Also, we have made a few files and directories with artificially distinct names so that dependencies are a little clearer than in most tutorials.
File: __init__.py (shown below)
from flask import Flask

ned = Flask(__name__)
ned.config.from_object('config')
NOTE: Only the root name of the config.py file (shown below) is used in the ned.config.from_object() call.
File: routes.py (shown below)
from paa import ned

@ned.route('/')
def slash():
    title = "<title>Ned's Greeting</title>"
    greeting = '<span style="color: blue"><b>Howdy Doodly, Neighbor!'
    return f'{title}\n{greeting}\n'
NOTE: Our default practice is to use single quotes for strings, but the apostrophe in "Ned's Greeting" necessitates double quotes for the title value.
File: run_me.py (shown below)
from paa import ned
from paa import routes

if __name__ == '__main__':
    ned.run()
NOTE: Flask defaults to serving the application at address localhost:5000 unless some other host and port are specified in the "ned.run()" statement. As you'll see later, there is no need for those parameters when using uWSGI and Nginx in this example.
File: config.py (shown below)
SECRET_KEY = 'ivegotasecret'
We've included a Flask SECRET_KEY configuration value here for the sake of completeness. It is not necessary in the example shown above. In any real Flask application (using Nginx and uWSGI), though, you're going to use the "session" object at some point, and you cannot do that unless you have set the SECRET_KEY configuration value. See this StackOverflow question/answer for a full description of the Flask "session" object.

Test Flask App in Flask Development Environment

In order to test the Flask application using the curl command, we need to have two terminal sessions - one to execute the Flask app and one to execute the curl command.
To run the Flask application, set up in the .../alex directory:
(sam) joe@helium:~/alex$ export FLASK_APP=run_me.py
(sam) joe@helium:~/alex$ flask run
 * Serving Flask app "run_me.py"
 * Environment: production
   WARNING: Do not use the development server in a production environment.
   Use a production WSGI server instead.
 * Debug mode: off
 * Running on http://127.0.0.1:5000/ (Press CTRL+C to quit)
.
.
.
[ctl-C to end the application after the curl test]
In another terminal window, enter the curl command:
joe@helium:~$ curl localhost:5000
<title>Ned's Greeting</title>
<span style="color: blue"><b>Howdy Doodly, Neighbor!
Now we leave this minimally operational Flask application on the shelf while we put Nginx and uWSGI together.

Set uWSGI Parameters to Work with Flask Application

The uwsgi.ini file sets values for all of the parameters required to link a Python-Flask application to the Nginx server.
The module parameter identifies the Python file to run (run_me.py) and the Flask application object (ned).
The master parameter is a standard setting for production environments.
The processes parameter is commonly set to 5 as a default. To get an optimal setting requires experimentation with the application under load.
The socket parameter provides the name of the socket connection between uWSGI and Nginx. Note that the socket value is also identified in the Nginx configuration file. These have to match in order for uWSGI to link correctly with Nginx - a common mechanism for coordinating application elements.
The chmod-socket parameter is supposed to provide the "uWSGI user" access to the socket. The value 664 is specified in the documentation, but it did not work for us, so we show it here as 666, which did work for us.
The vacuum parameter directs uWSGI to delete the Unix socket when the uWSGI server terminates.
The uid and gid parameters identify the user and group running the uWSGI server.
The die-on-term parameter directs uWSGI to "brutally reload all the workers and the master process" when the application exits.
The uwsgi.ini parameter file is shown below:
[uwsgi]
module=run_me:ned

master = true
processes = 5

socket = baker.sock
chmod-socket = 666  
vacuum = true

uid = joe
gid = www-data

die-on-term = true

Setup Nginx to Work With uWSGI Parameters

The Nginx configuration file shown below links up with the uWSGI server via the include and uwsgi_pass parameters.
/etc/nginx/conf.d/helium.conf
server {
    listen 8181;
    server_name localhost;

    location / {
        include   uwsgi_params;
        uwsgi_pass  unix:/home/joe/alex/baker.sock;
    }
}
The /etc/nginx/conf.d/helium.conf file is included in the last line of the standard "starter" configuration file that comes with installation of the Nginx server from the Nginx.org site.
user  nginx;
worker_processes  1;

error_log  /var/log/nginx/error.log warn;
pid        /var/run/nginx.pid;


events {
    worker_connections  1024;
}


    http {
    include       /etc/nginx/mime.types;
    default_type  application/octet-stream;

    log_format  main  '$remote_addr - $remote_user [$time_local] "$request" '
                      '$status $body_bytes_sent "$http_referer" '
                      '"$http_user_agent" "$http_x_forwarded_for"';

    access_log  /var/log/nginx/access.log  main;

    sendfile        on;
    #tcp_nopush     on;

    keepalive_timeout  65;

    #gzip  on;

    include /etc/nginx/conf.d/helium.conf;
}

Run Nginx and uWSGI Servers

To restart the Nginx server:
$ sudo service nginx restart
In order to start uWSGI properly for the .../alex/paa application, you have to activate the sam virtual environment, then run the uWSGI server.
The --ini uwsgi.ini parameter directs uWSGI to our .ini file.
The --daemonize uwsgi.log parameter runs this instance of the uWSGI server as a daemon process and directs the server to write its log output to uwsgi.log.
The --safe-pidfile /tmp/alex.pid parameter saves the pid for this uWSGI server process to the file "/tmp/alex.pid". That pid number is referenced to "gracefully" reload the uWSGI server or to stop it. (See this.)
(sam) joe@helium:~/alex$ uwsgi --ini uwsgi.ini --daemonize uwsgi.log --safe-pidfile /tmp/alex.pid
And here's the greeting displayed from the command line with "curl localhost:8181":
(sam) joe@helium:~$ curl localhost:8181
<title>Ned's Greeting</title>
<span style="color: blue"><b>Howdy Doodly, Neighbor!</b></span>
There we have it: the Flask application is joined to Nginx via uWSGI.

Twice the Fun

But we could not resist checking this out.
One immediate implication of installing uWSGI into a Python virtual environment is that we should be able to run multiple Flask applications from separate virtual environments - each through its own uWSGI connection to Nginx. And it turns out that we can.
We're just going to sketch this implementation because it is so straightforward. We created a second virtual environment named "western" and installed both Flask and uWSGI as before. We made a project directory named "scifi" and a "noir" directory for the Flask application. The listings below show the substitutions in the western/scifi/noir setup. Compare to the sam/alex/paa setup above.
scifi/noir/init.py
from flask import Flask

hitchcock = Flask(__name__)
hitchcock.config.from_object('config')
scifi/noir/views.py
from noir import hitchcock

@hitchcock.route('/')
def slash():
    title = f"<title>Confusion Cinema</title>"
    greeting = f'<span style="color: blue"><b>Some like hot, psycho birds!</b></span>\n'
    return f'{title}\n{greeting}\n'
scifi/run_fi.py
from noir import hitchcock
from noir import views

if __name__ == '__main__':
    hitchcock.run()
scifi/config.py
SECRET_KEY = 'ivegotasecret'
scifi/uwsgi.ini
[uwsgi]
module=run_fi:hitchcock

master = true
processes = 5

socket = marilyn.sock
chmod-socket = 666
vacuum = true

uid = joe
gid = www-data

die-on-term = true
We distinguish the two separate Flask applications by serving them via separate ports. The uWSGI servers takes care of linking the Flask applications to the correct ports for the Nginx server.
server {
   listen 8181;
   server_name localhost;

    location / {
        include   uwsgi_params;
        uwsgi_pass  unix:/home/joe/alex/baker.sock;
    }
}

server {
    listen 8080;
    server_name localhost;

    location / {
        include   uwsgi_params;
        uwsgi_pass  unix:/home/joe/scifi/marilyn.sock;
    }
}

woensdag 2 oktober 2019

Integrate dash in flask Applicatie

maak virtuele omgeving

cd dashapps
python -m venv venv_dash
source venv_dash/bin/activate

installeer flask en dash in virtuele omgeving

pip install flask
pip install dash
pip install dash-daq

uit virtuele omgeving 

deactivate


Integreer Dash in Flask

(https://hackersandslackers.com/gaining-full-control-over-plotly-dash/)

directorystructuur simpel


dashapps(dir)
    -- mydashapp(dir)
             -- __init__.py
             -- routes.py
             -- dash_application (dir)
                    --exampledash.py
    -- wsgi.py

wsgi.py

from mydashapp import create_app
app = create_app()
if __name__ == "__main__":
    app.run(host='0.0.0.0', debug=True)

__init__.py

"""Initialize app."""
from flask import Flask

def create_app():
    """Construct the core application."""
    app = Flask(__name__,  instance_relative_config=False)
    #app.config.from_object('config.Config')

    with app.app_context():
        # Import main Blueprint
        #from . import routes
        from mydashapp import routes
        # Import Dash application
        from .dash_application import exampledash
        app = exampledash.Add_Dash(app)
        return app

Uitleg:
het gaat hier om de volgende 2 regels
# Import Dash application
from .dash_application import dash_example
app = dash_example.Add_Dash(app)
dash_example is actually a Python file ( ./dash_application/dash_example.py ) which contains our Dash app! Dash typically likes to have a single .py file per view, which turns out to work great for us. Let's look at why this works by checking dash_example.py:




routes.py

import os
from flask import Blueprint, render_template
from flask import current_app as app


@app.route('/')
@app.route('/index')
def index():
    return "Hello, World Lets make a party!"

exampledash.py


from dash import Dash
import dash_table
import dash_core_components as dcc
import dash_html_components as html
import pandas as pd


def Add_Dash(server):
    """Create Dash app."""
    external_stylesheets = ['/static/dist/css/style.css']
    external_scripts = ['/static/dist/js/includes/jquery.min.js',
                    '/static/dist/js/main.js']
                   
    dash_app = Dash(server=server,
                    external_stylesheets=external_stylesheets,
                    external_scripts=external_scripts,
                    routes_pathname_prefix='/commands/')

    # Create Dash Layout
    dash_app.layout = html.Div(children=[
    html.H1(children='Hello Dash'),

    html.Div(children='''
        Dash: A web application framework for Python.
    '''),

    dcc.Graph(
        id='example-graph',
        figure={
            'data': [
                {'x': [1, 2, 3], 'y': [4, 1, 2], 'type': 'bar', 'name': 'SF'},
                {'x': [1, 2, 3], 'y': [2, 4, 5], 'type': 'bar', 'name': u'Montréal'},
            ],
            'layout': {
                'title': 'Dash Data Visualization'
            }
        }
    )
])

    return dash_app.server

Uitleg.
Het gaat hier de volgende code
dash_app = Dash(server=server,
                external_stylesheets=external_stylesheets,
                external_scripts=external_scripts,
                routes_pathname_prefix='/commands/')
 
 
We pass our Flask instance to Add_Dash as a parameter called server. Unlike the previous examples, 
its actually server running the show this time, with Dash piggybacking as a module. 
Instead of creating our dash_app object as a global variable (as is suggested), we stuck in a 
function called Add_Dash(). This allows us to pass our top-level Flask app into Dash as server,
hence dash_app = Dash(server=server)
This effectively spins up a Dash instance using our Flask app at its core, as opposed 
to its own!
 



start flaskapp


python wsgi.py



Deel2 : Meerdere dashboards in een Flask


dit doe je als volgt :


__init__.py



"""Initialize app."""
from flask import Flask


def create_app():
    """Construct the core application."""
    server = Flask(__name__,
                instance_relative_config=False)
    #app.config.from_object('config.Config')

    with server.app_context():
        from mydashapp2 import routes
        from .dash_application import mydash1
        app = mydash1.Add_Dash(server)
        from .dash_application import mydash2
        app = mydash2.Add_Dash(server)
        return app



In  je subdirectory dash_application  zitten je 2 dashboards  mydash1.py en mydash2.py

mydash1.py


from dash import Dash
import dash_table
import dash_core_components as dcc
import dash_html_components as html
import pandas as pd


def Add_Dash(server):
    """Create Dash app."""
    external_stylesheets = ['/static/dist/css/style.css']
    external_scripts = ['/static/dist/js/includes/jquery.min.js',
                    '/static/dist/js/main.js']
                   
    dash_app = Dash(server=server,
                    external_stylesheets=external_stylesheets,
                    external_scripts=external_scripts,
                    url_base_pathname=f'/mydash1/')

    # Create Dash Layout
    dash_app.layout = html.Div(children=[
    html.H1(children='Hello Dash 1'),

    html.Div(children='''
        Dash: Dashboard 1.
    '''),
])

    return dash_app.server



mydash2


from dash import Dash
import dash_table
import dash_core_components as dcc
import dash_html_components as html
import pandas as pd


def Add_Dash(server):
    """Create Dash app."""
    external_stylesheets = ['/static/dist/css/style.css']
    external_scripts = ['/static/dist/js/includes/jquery.min.js',
                    '/static/dist/js/main.js']
                   
    dash_app = Dash(server=server,
                    external_stylesheets=external_stylesheets,
                    external_scripts=external_scripts,
                    url_base_pathname=f'/mydash2/')

    # Create Dash Layout
    dash_app.layout = html.Div(children=[
    html.H1(children='Hello Dash 2'),

    html.Div(children='''
        Dash: Dashboard 2.
    '''),
])

    return dash_app.server

maandag 30 september 2019

matplotlib https://matplotlib.org/3.1.1/tutorials/introductory/pyplot.html

https://matplotlib.org/3.1.1/tutorials/introductory/pyplot.html

Why f, ax = plt.subplots()

 
f, ax = plt.subplots()
ax.plot(x, y)
ax.set_title('Simple plot')


plt.subplots() is a function that returns a tuple containing a figure and axes object(s). Thus when using fig, ax = plt.subplots() you unpack this tuple into the variables fig and ax.
 Having fig is useful if you want to change figure-level attributes or save the figure as an image file later (e.g. with fig.savefig('yourfilename.png')). You certainly don't have to use the returned figure object but many people do use it later so it's common to see.
Also, all axes objects (the objects that have plotting methods), have a parent figure object anyway, thus:

fig, ax = plt.subplots()

is more concise than this:
 
fig = plt.figure()
ax = fig.add_subplot(111)
 
 
 
plt.plot([1, 2, 3, 4], [1, 4, 9, 16]) 


Plot Geodataframe kleur


vrijdag 13 september 2019

Geomapping Folium

# -*- coding: utf-8 -*-
"""
Created on Thu Aug 29 09:42:55 2019

@author: wagenerj
"""
import pandas as pd
import folium
import geojson
from shapely.geometry import shape


shapefile2 = 'C://Users//wagenerj//Documents//data//amsterdamgeo//GEBIEDEN22.json'#Read shapefile using Geopandas

# =============================================================================
# https://github.com/python-visualization/folium/blob/master/examples/GeoJSON_and_choropleth.ipynb
# =============================================================================

#Lees GEOJSON file in
with open(shapefile2) as f:
    geo_json_data = geojson.load(f)



# =============================================================================
# Probeer de structuur van de GEOJSON file te begrijpen
#
# GEOJSON ___type (string)
#         ___features (list)
#       
# features is een lijst van  Feature objecten
#
# een Feature is een collectie met de volgende keys
# geometry   (geometry.Polygon)
# id         (int)
# properties (dict)
# type       (str)
#
# EVT TE bekijken via
# for key, value in geo_json_data.features[2].items() :
#     print (key)
#
# Belangrijk is om te weten wat de properties zijn van het feature die kunnen nl elke keer weer een beetje verschillen
# dit kan je opvragen door bijv
# geo_json_data.features[2]['properties']
#
#
# for feature in geo_json_data['features']:
#     print(feature['properties'])
#
# geo_json_data.features[2].properties
# geo_json_data.features[2].properties['Gebied']
# geo_json_data.features[2].properties['Gebied_code']
# =============================================================================


for feature in geo_json_data['features']:
     print(feature['id'])
#    print(feature['properties']['Gebied_code'])
     feature['properties']['centroid_x']=shape(feature['geometry']).centroid.x
     feature['properties']['centroid_y']=shape(feature['geometry']).centroid.y
     print(shape(feature['geometry']).centroid)
# Essentieel we maken hier het feature ID gelijk aan Gebied_code omdat er op gematsch kan worden 
#    feature['id']=feature['properties']['Gebied_code']




m = folium.Map(location=[52.37, 4.90], tiles='Stamen Terrain', zoom_start=12)
#simpel
folium.GeoJson(geo_json_data).add_to(m)
m.save("C://Users//wagenerj//Documents//data//amsterdamgeo//m.html" )


m = folium.Map(location=[52.37, 4.90], tiles='Stamen Terrain', zoom_start=12)
folium.GeoJson(
    geo_json_data,
    style_function=lambda feature: {
       'fillColor': 'green' if 'e' in feature['properties']['Gebied'].lower() else '#ffff00',
        'color': 'black',
        'weight': 2,
        'dashArray': '5, 5'
    }
).add_to(m)
m.save("C://Users//wagenerj//Documents//data//amsterdamgeo//m.html" )



# we moeten een een kleur voor elk gebied berekenen
gbdata = pd.read_csv('C://Users//wagenerj//Documents//data//amsterdamgeo//datapergebied.csv',sep=';')
gbdata.columns =gbdata.columns.str.strip()


#create een functiie die op basis van de waarden een colormap aanmaakt
from branca.colormap import linear
colormap = linear.YlGn_09.scale(gbdata['aantal'].min(),gbdata['aantal'].max())
print(colormap(5.0))
colormap
print(colormap)

#convert de tabel in a dictionary, zodat we een featere aan de waarde van het gebeid kunnen koppelen
gbdata_dict= gbdata.set_index('Gebied_code')['aantal']


#Essentie is hier dat je bij fillcoolor de juiste key doorgeeft aan de gbdata_dict. Hierin moet de gebiedscode staan van het Feature :feature['properties']['Gebied_code']
#WERKEND add Popup to polygon shapes in chloropleth
#https://github.com/python-visualization/folium/issues/1020
m = folium.Map(location=[52.37, 4.90], tiles='Stamen Terrain', zoom_start=12)
gj=folium.GeoJson(
    geo_json_data,
    name='unemployment',
    tooltip=folium.GeoJsonTooltip(fields=['centroid_x']),
    style_function=lambda feature: {
        'fillColor': colormap(gbdata_dict[feature['properties']['Gebied_code']]),
        'color': 'black',
        'weight': 1,
        'dashArray': '5, 5',
        'fillOpacity': 0.9,})
#gj.add_child(folium.Popup('outline Popup on GeoJSON'))
gj.add_to(m)
m.save("C://Users//wagenerj//Documents//data//amsterdamgeo//m.html" )







   
   








#https://github.com/python-visualization/folium/issues/1020
m = folium.Map(location=[52.37, 4.90], tiles='Stamen Terrain', zoom_start=12)
gj=folium.GeoJson(
    geo_json_data,
    name='unemployment',
    tooltip=folium.GeoJsonTooltip(fields=['Gebied']),
    style_function=lambda feature: {
        'fillColor': colormap(gbdata_dict[feature['properties']['Gebied_code']]),
        'color': 'black',
        'weight': 1,
        'dashArray': '5, 5',
        'fillOpacity': 0.9,})
#gj.add_child(folium.Popup('outline Popup on GeoJSON'))
gj.add_to(m)
m.save("C://Users//wagenerj//Documents//data//amsterdamgeo//m.html" )


# Let op dat de Xen Y  in de juiste volgorde staan.
for feature in geo_json_data['features']:
    folium.Marker(
        location=[feature['properties']['centroid_y'], feature['properties']['centroid_x']],
        popup=folium.Popup("Let's try quotes", parse_html=True, max_width=100)
    ).add_to(m)

m.save("C://Users//wagenerj//Documents//data//amsterdamgeo//m.html" )











































>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>
There is a work-around for this. You need to iterate over the each geoJson feature and create a new geojson for each one.
Then, add a popup for each geoJson feature. Then combine all features in a layer. In my code, the full geoJson is data_geojson_dict

m = folium.Map(location=[52.37, 4.90], tiles='Stamen Terrain', zoom_start=12)
list_tooltip_vars=['Gebied_code']

layer_geom = folium.FeatureGroup(name='layer',control=False)

for i in range(len(geo_json_data["features"])):
    temp_geojson = {"features":[geo_json_data["features"][i]],"type":"FeatureCollection"}
    temp_geojson_layer = folium.GeoJson(temp_geojson,
                   highlight_function=lambda x: {'weight':3, 'color':'black'},
                    control=False,
                    style_function=lambda feature: {
                   'color': 'black',
                   'weight': 1},
                    tooltip=folium.features.GeoJsonTooltip(fields=list_tooltip_vars,
                                        aliases=[x.capitalize()+":" for x in list_tooltip_vars],
                                          labels=True,
                                          sticky=False))
#    folium.Popup(temp_geojson["features"][0]["properties"]["Gebied"]).add_to(temp_geojson_layer)
    temp_geojson_layer.add_to(layer_geom)

layer_geom.add_to(m)
folium.LayerControl(autoZIndex=False, collapsed=True).add_to(m)
m.save("C://Users//wagenerj//Documents//data//amsterdamgeo//m.html" )





# =============================================================================
# # Using Choropleth class
# # Now if you want to get faster, you can use the Choropleth class. Have a look at it's docstring, it has several styling options.
# # You can use it in providing a file name (geo_path) :
# # =============================================================================
m = folium.Map(location=[52.37, 4.90], tiles='Stamen Terrain', zoom_start=12)
folium.Choropleth(geo_data=geo_json_data).add_to(m)
m.save("C://Users//wagenerj//Documents//data//amsterdamgeo//m.html" )


# werkend folium.chloropleth
m = folium.Map(location=[52.37, 4.90], tiles='Stamen Terrain', zoom_start=12)
folium.Choropleth(
    geo_data=geo_json_data,
    data=gbdata,
    columns=['Gebied_code', 'aantal'],
    key_on='feature.properties.Gebied_code',
    fill_color='YlGn',
    fill_opacity=0.7,
    line_opacity=0.2,
    legend_name='Gebieden  (%)',
#    highlight=True,
    tooltip=folium.GeoJsonTooltip(fields=['Gebied'])
).add_to(m)
m.save("C://Users//wagenerj//Documents//data//amsterdamgeo//m.html" )




woensdag 4 september 2019

Converteren datatypes

converteer bedrag naar numeriek


Vaak krijg je bij inlezen van data uit csv een bedrag in Nederlandse notatie. dus met een Komma. Dit bedrag wordt niet gezien als een getal en ingelezen als dtype string. je kan niet zomaar PD.TO_NUMERIC doen

Hoe maak je van deze string een bedrag


vervang eerst punt door komma met str.replace en dan converteren


dfcombi['bedrag_ZENG']=round(pd.to_numeric(dfcombi['BEDRAG_2018'].str.replace(',', '.')),0)


 

 

Converteer Floating getallen naar integers in een dataframe

selecteer eerst alle dtypes kolomen in een dataframe en filter evt nog een column. Verander daarna collectief het datatype

df_float_col = dfData.select_dtypes(include = ['float64'])
df_float_col=df_float_col.drop(columns=['bedrag'])
print(dffloat_col.columns)
for col in df_float_col.columns.values:
    dfData[col]=dfData[col].fillna(0.0).astype('int64')
dfData



woensdag 17 juli 2019

SQLALCHEMY : dynamische dataframe in een tabel stoppen zonder class te definieren (ORACLE 12)

#  Hoe krijg ik een grote dataset snel in een tabel
#  Oracle heeft zijn eigen specifieke dingetjes mbt datatypes etc

from sqlalchemy import *
from sqlalchemy.ext.declarative import declarative_base
from sqlalchemy.orm import sessionmaker
from sqlalchemy.sql import *

from sqlalchemy import Table, MetaData, Column, Integer, String, ForeignKey
from sqlalchemy.orm import mapper
from sqlalchemy.dialects.oracle import VARCHAR2

sqllite_DB='sqlite:///C:\\Users\\wagene002\\Documents\\Python\\howto\\DB_ZenG.db'
engine = create_engine(sqllite_DB,echo=False)
Base = declarative_base()

import cx_Oracle

#  method 2: met service naam
oracle_connection_string = ('oracle+cx_oracle://DM:*****@(DESCRIPTION=(ADDRESS_LIST=(ADDRESS=(PROTOCOL=TCP)(HOST=***)(PORT=1521)))(CONNECT_DATA=(SERVICE_NAME=********)(SERVER=DEDICATED)))')engine = create_engine(oracle_connection_string)
Base = declarative_base()



#==>  Maak een Pandas dataset
import pandas as pd
df=pd.read_csv('C:\\Users\wagene002\Documents\Python\howto\levering1.csv')
df1=df[[ 'valid_bsn','bsn', 'code_voorziening', 'jaar', 'bedrag']]


# speciaal voor Oracle: to_sql maakt clobs van characters, via dtype dit oplossen

# manier 1: maak handmatig types aan
dict_types={'bsn': VARCHAR2(128), 'code_voorziening': VARCHAR2(100)}
# manier 2: maak automatisch dict aan met alle velden  varchar
dict_types={}
for i in df1.columns:
    dict_types[i]= VARCHAR2(150)

#maak een tabel van een gestripte dataset (3 records)
dfDef=df1.iloc[0:2,]
dfDef.to_sql(name='LeveringenSociaal',con=engine, index=False,if_exists="replace" ,dtype=dict_types)

# ORACLE TRUUK:  maak alle velden van de de dataset die weggeschreven worden string values
#zonder deze stap krijg je bij wegschrijven naar ORAClEfoutmelding TypeError: expecting string or bytes object
df8=df1.astype(str)


#===> Map een Database Tabel aan een Class Object cLev
class cLev(object):
    pass

metadata=MetaData(engine)
tblLeveringen=Table('LeveringenSociaal', metadata,Column("id", Integer, primary_key=True) ,autoload=True)
engine.execute(tblLeveringen.delete())
mapper(cLev,tblLeveringen)


# Nu de volledige dataset in de tabel stoppen
Session = sessionmaker(bind=engine)
session = Session()
session.bulk_insert_mappings(cLev, df8.to_dict(orient="records"))
session.commit()
session.close()

vrijdag 12 juli 2019

SQLALCHEMY: links

https://auth0.com/blog/sqlalchemy-orm-tutorial-for-python-developers/

http://www.blog.pythonlibrary.org/2010/09/10/sqlalchemy-connecting-to-pre-existing-databases/

https://www.freecodecamp.org/news/sqlalchemy-makes-etl-magically-easy-ab2bd0df928/

https://www.codementor.io/bruce3557/graceful-data-ingestion-with-sqlalchemy-and-pandas-pft7ddcy6


https://sdsawtelle.github.io/blog/output/large-data-files-pandas-sqlite.html

SQLALCHEMY : Oracle tabel aanmaken

from sqlalchemy import *
from sqlalchemy import create_engine, ForeignKey
from sqlalchemy import Column, Date, Integer, String

from sqlalchemy.dialects.oracle import VARCHAR2
from sqlalchemy.ext.declarative import declarative_base
from sqlalchemy.orm import relationship, backref
import cx_Oracle

#  method 2: met service naam
oracle_connection_string = ('oracle+cx_oracle://DM:######@(DESCRIPTION=(ADDRESS_LIST=(ADDRESS=(PROTOCOL=TCP)(HOST=******.basis.lan)(PORT=1521)))(CONNECT_DATA=(SERVICE_NAME=********)(SERVER=DEDICATED)))')

engine = create_engine(oracle_connection_string)
# engine = create_engine('sqlite:///student.db', echo=True)
Base = declarative_base()

########################################################################
class Student(Base):
    """"""
    __tablename__ = "student"

    id = Column(Integer, primary_key=True)
    username = Column(VARCHAR2(255))
  

    #----------------------------------------------------------------------
    def __init__(self, username, firstname, lastname, university):
        """"""
        self.username = username
       

# create tables
Base.metadata.create_all(engine)

SQLALCHEMY: Hoe vul je tabel uit Statische dataframe via ORM class.

from sqlalchemy import *
from sqlalchemy.ext.declarative import declarative_base
from sqlalchemy.orm import sessionmaker
from sqlalchemy.sql import *


sqllite_DB='sqlite:///C:\\Users\\wagene002\\Documents\\Python\\howto\\DB_ZenG.db'
engine = create_engine(sqllite_DB)
Base = declarative_base()

class LeveringenSociaal(Base):
    __tablename__ = "LeveringenSociaal"
    Index = Column(Integer, primary_key=True)
    valid_bsn = Column(String)
    bsn = Column(String)
    code_voorziening = Column(String)
    jaar = Column(String)
    bedrag = Column(String)

LeveringenSociaal.__table__.create(bind=engine, checkfirst=True)



==>
import pandas as pd
df=pd.read_csv('C:\\Users\wagene002\Documents\Python\howto\levering1.csv')

df1=df[['valid_bsn','bsn', 'code_voorziening', 'jaar', 'bedrag']]



===> manier 1. Niet zo snel. Per record Inserten
leveringensociaal=[]

for index,row in df1.iterrows():
    leveringensociaal.append(row)
  
Session = sessionmaker(bind=engine)
session = Session()

for lever in leveringensociaal:
    row = LeveringenSociaal(**lever)
    session.add(row)
  

session.commit()


===>  manier 2: Zeer Snel. Via Bulk Loader
Session = sessionmaker(bind=engine)
session = Session()
session.bulk_insert_mappings(LeveringenSociaal, df1.to_dict(orient="records"))
session.commit()
session.close()

SQLALCHEMY : dynamische dataframe in een tabel stoppen zonder class te definieren (SQLLITE)

#  Hoe krijg ik een grote dataset snel in een tabel
from sqlalchemy import *
from sqlalchemy.ext.declarative import declarative_base
from sqlalchemy.orm import sessionmaker
from sqlalchemy.sql import *

from sqlalchemy import Table, MetaData, Column, Integer, String, ForeignKey
from sqlalchemy.orm import mapper

sqllite_DB='sqlite:///C:\\Users\\wagene002\\Documents\\Python\\howto\\DB_ZenG.db'
engine = create_engine(sqllite_DB,echo=False)
Base = declarative_base()

#==>  Maak een Pandas dataset

import pandas as pd
df=pd.read_csv('C:\\Users\wagene002\Documents\Python\howto\levering1.csv')
df1=df[[ 'valid_bsn','bsn', 'code_voorziening', 'jaar', 'bedrag']]



#maak een tabel van een gestripte dataset (3 records)
dfDef=df1.iloc[0:2,]
dfDef.to_sql(name='LeveringenSociaal',con=engine, index=False,if_exists="replace" )


#===> Map een Database Tabel aan een Class Object cLev


class cLev(object):
    pass

metadata=MetaData(engine)
tblLeveringen=Table('LeveringenSociaal', metadata,Column("id", Integer, primary_key=True) ,autoload=True)
engine.execute(tblLeveringen.delete())
mapper(cLev,tblLeveringen)



# Nu de volledige dataset in de tabel stoppen

Session = sessionmaker(bind=engine)
session = Session()
session.bulk_insert_mappings(cLev, df1.to_dict(orient="records"))
session.commit()
session.close()

dinsdag 7 mei 2019

Password hash Security

Workzeug is a package for password hashing


>>> from werkzeug.security import generate_password_hash
>>> hash = generate_password_hash('foobar')
>>> hash
'pbkdf2:sha256:50000$vT9fkZM8$04dfa35c6476acf7e788a1b5b3c35e217c78dc04539d295f011f01f18cd2175f'



Verification process


>>> from werkzeug.security import check_password_hash
>>> check_password_hash(hash, 'foobar')
True
>>> check_password_hash(hash, 'barfoo')
False



multiple hash

Werkzeug generate_password_hash("same password") genereates different output each time when i run it multiple times

The password is salted, yes. The salt is added to the password before hashing, to ensure that the hash isn't useable in a rainbow table attack.

Because the salt is randomly generated each time you call the function, the resulting password hash is also different. The returned hash includes the generated salt so that can still correctly verify the password.

Demo:

>>> from werkzeug.security import generate_password_hash
>>> generate_password_hash('foobar')
'pbkdf2:sha1:1000$tYqN0VeL$2ee2568465fa30c1e6680196f8bb9eb0d2ca072d'
>>> generate_password_hash('foobar')
'pbkdf2:sha1:1000$XHj5nlLU$bb9a81bc54e7d6e11d9ab212cd143e768ea6225d'

These two strings differ; but contain enough information to verify the password because the generated salt is included in each:

# pbkdf2:sha1:1000$tYqN0VeL$2ee2568465fa30c1e6680196f8bb9eb0d2ca072d
  ^^^^^^^^^^^^^^^^   salt   ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^
      algo info    ^^^^^^^^        actual hash of the password
  (PBKDF2 applied SHA1 1000 times)


Because the random salt is tYqN0VeL for one and XHj5nlLU, the resulting hash is also different.

The foobar password can still be verified against either hash:

>>> from werkzeug.security import check_password_hash
>>> check_password_hash('pbkdf2:sha1:1000$tYqN0VeL$2ee2568465fa30c1e6680196f8bb9eb0d2ca072d', 'foobar')
True
>>> check_password_hash('pbkdf2:sha1:1000$XHj5nlLU$bb9a81bc54e7d6e11d9ab212cd143e768ea6225d', 'foobar')
True 



ll





Datums bepalen adhv begin en einddatum in Dataframe

Voorbeeld op losse velden  ####################################################################### # import necessary packages from datetime...