AgGrid: FilterModel default to OR operator

I was wondering if it would be possible to set the default filtering operator in AgGrid to use the OR operator:

Taking the example here, can we have the filter:

athlete == “michael phelps” OR country == “Russia”

import dash_ag_grid as dag
from dash import Dash, Input, Output, dcc, html, callback
import pandas as pd
import os



app = Dash(__name__)


df = pd.read_csv(
    "https://raw.githubusercontent.com/plotly/datasets/master/ag-grid/olympic-winners.csv"
)
df["index"] = df.index



columnDefs = [
    {"field": "athlete", "filter": "agSetColumnFilter"},
    {
        "field": "age",
        "filter": "agNumberColumnFilter",
        "filterParams": {
            "filterOptions": ["equals", "lessThan", "greaterThan"],
            "maxNumConditions": 1,
        },
    },
    {
        "field": "country",
        "filter": "agSetColumnFilter",
        "filterParams": {"values": df.country.unique()},
    },
    {
        "field": "year",
        "filter": "agSetColumnFilter",
        "filterParams": {"values": ["2000", "2004", "2008", "2012"]},
    },
    {"field": "athlete"},
    {"field": "date"},
    {"field": "sport", "suppressMenu": True},
    {"field": "total", "suppressMenu": True},
]

defaultColDef = {
    "flex": 1,
    "minWidth": 150,
    "floatingFilter": True,
}


app.layout = html.Div(
    [
        dcc.Markdown("Infinite scroll with sort and filter"),
        dag.AgGrid(
            id="infinite-sort-set-filter-grid",
            columnDefs=columnDefs,
            defaultColDef=defaultColDef,
            rowModelType="infinite",
           # Set filter is an AG Grid Enterprise feature.
           # A license key should be provided if it is used.
           # License keys can be passed to the `licenseKey` argument of dag.AgGrid
            enableEnterpriseModules=True,
            licenseKey=os.environ['AGGRID_ENTERPRISE'],
            dashGridOptions={
                # The number of rows rendered outside the viewable area the grid renders.
                "rowBuffer": 0,
                # How many blocks to keep in the store. Default is no limit, so every requested block is kept.
                "maxBlocksInCache": 2,
                "cacheBlockSize": 100,
                "cacheOverflowSize": 2,
                "maxConcurrentDatasourceRequests": 2,
                "infiniteInitialRowCount": 1,
                "rowSelection": "multiple",
                "animateRows": False
            },
            getRowId="params.data.index"
        ),
    ],
)

operators = {
    "greaterThanOrEqual": "ge",
    "lessThanOrEqual": "le",
    "lessThan": "lt",
    "greaterThan": "gt",
    "notEqual": "ne",
    "equals": "eq",
}


def filter_df(dff, filter_model, col):
    if "filter" in filter_model:
        if filter_model["filterType"] == "date":
            crit1 = filter_model["dateFrom"]
            crit1 = pd.Series(crit1).astype(dff[col].dtype)[0]
            if "dateTo" in filter_model:
                crit2 = filter_model["dateTo"]
                crit2 = pd.Series(crit2).astype(dff[col].dtype)[0]
        else:
            crit1 = filter_model["filter"]
            crit1 = pd.Series(crit1).astype(dff[col].dtype)[0]
            if "filterTo" in filter_model:
                crit2 = filter_model["filterTo"]
                crit2 = pd.Series(crit2).astype(dff[col].dtype)[0]
    if "type" in filter_model:
        if filter_model["type"] == "contains":
            dff = dff.loc[dff[col].str.contains(crit1)]
        elif filter_model["type"] == "notContains":
            dff = dff.loc[~dff[col].str.contains(crit1)]
        elif filter_model["type"] == "startsWith":
            dff = dff.loc[dff[col].str.startswith(crit1)]
        elif filter_model["type"] == "notStartsWith":
            dff = dff.loc[~dff[col].str.startswith(crit1)]
        elif filter_model["type"] == "endsWith":
            dff = dff.loc[dff[col].str.endswith(crit1)]
        elif filter_model["type"] == "notEndsWith":
            dff = dff.loc[~dff[col].str.endswith(crit1)]
        elif filter_model["type"] == "inRange":
            if filter_model["filterType"] == "date":
                dff = dff.loc[
                    dff[col].astype("datetime64[ns]").between_time(crit1, crit2)
                ]
            else:
                dff = dff.loc[dff[col].between(crit1, crit2)]
        elif filter_model["type"] == "blank":
            dff = dff.loc[dff[col].isnull()]
        elif filter_model["type"] == "notBlank":
            dff = dff.loc[~dff[col].isnull()]
        else:
            dff = dff.loc[getattr(dff[col], operators[filter_model["type"]])(crit1)]
    elif filter_model["filterType"] == "set":
        dff = dff.loc[dff[col].astype("string").isin(filter_model["values"])]
    return dff


@callback(
    Output("infinite-sort-set-filter-grid", "getRowsResponse"),
    Input("infinite-sort-set-filter-grid", "getRowsRequest"),
)
def infinite_scroll(request):
    dff = df.copy()

    if request:
        if request["filterModel"]:
            filters = request["filterModel"]
            for f in filters:
                try:
                    if "operator" in filters[f]:
                        if filters[f]["operator"] == "AND":
                            dff = filter_df(dff, filters[f]["condition1"], f)
                            dff = filter_df(dff, filters[f]["condition2"], f)
                        else:
                            dff1 = filter_df(dff, filters[f]["condition1"], f)
                            dff2 = filter_df(dff, filters[f]["condition2"], f)
                            dff = pd.concat([dff1, dff2])
                    else:
                        dff = filter_df(dff, filters[f], f)
                except:
                    pass

        if request["sortModel"]:
            sorting = []
            asc = []
            for sort in request["sortModel"]:
                sorting.append(sort["colId"])
                if sort["sort"] == "asc":
                    asc.append(True)
                else:
                    asc.append(False)
            dff = dff.sort_values(by=sorting, ascending=asc)

        lines = len(dff.index)
        if lines == 0:
            lines = 1

        partial = dff.iloc[request["startRow"] : request["endRow"]]
        return {"rowData": partial.to_dict("records"), "rowCount": lines}


if __name__ == "__main__":
    app.run_server(debug=True)

Since you are using Enterprise:

2 Likes

ah, love it :grin: I should check this blogs more often :blush: