Skip to content

class_performance_assessor.py

Details about cProfile output

Value Description
ncalls Shows the number of calls made.
tottime Total time taken by the given function. The time made in calls to sub-functions are excluded.
percall Total time per numbers of calls.
cumtime Like tottime, but includes time spent in all called subfunctions.
percall Quotient of cumtime divided by primitive calls. The primitive calls include all calls not included through recursion.

Information was collected here: https://www.machinelearningplus.com/python/cprofile-how-
to-profile-your-python-code/

An object to compute time and memory consumption.

PerformanceAssessor

A class to access the performance of a given function (memory or time).

Source code in src/perfassess/class_performance_assessor.py
 30
 31
 32
 33
 34
 35
 36
 37
 38
 39
 40
 41
 42
 43
 44
 45
 46
 47
 48
 49
 50
 51
 52
 53
 54
 55
 56
 57
 58
 59
 60
 61
 62
 63
 64
 65
 66
 67
 68
 69
 70
 71
 72
 73
 74
 75
 76
 77
 78
 79
 80
 81
 82
 83
 84
 85
 86
 87
 88
 89
 90
 91
 92
 93
 94
 95
 96
 97
 98
 99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
149
150
151
152
153
154
155
156
157
158
159
160
161
162
163
164
165
166
167
168
169
170
171
172
173
174
175
176
177
178
179
180
181
182
183
184
185
186
187
188
189
190
191
192
193
194
195
196
197
198
199
200
201
202
203
204
205
206
207
208
209
210
211
212
213
214
215
216
217
218
219
220
221
222
223
224
225
226
227
228
229
230
231
232
233
234
235
236
237
238
239
240
241
242
243
244
245
246
247
248
249
250
251
252
253
254
255
256
257
258
259
260
261
262
263
264
265
266
267
268
269
270
271
272
273
274
275
276
277
278
279
280
281
282
283
284
285
286
287
288
289
290
291
292
293
294
295
296
297
298
299
300
301
302
303
304
305
306
307
308
309
310
311
312
313
314
315
316
317
318
319
320
321
322
323
324
325
326
327
328
329
330
331
332
333
334
335
336
337
338
339
340
341
342
343
344
345
346
347
348
349
350
351
352
353
354
355
356
357
358
359
360
361
362
363
364
365
366
367
368
369
370
371
372
373
374
375
376
377
378
379
380
381
382
383
384
385
386
387
388
389
390
391
392
393
394
395
396
397
398
399
400
401
402
403
404
405
406
407
408
409
410
411
412
413
414
415
416
417
418
419
420
421
422
423
424
425
426
427
428
429
430
431
432
433
434
435
436
437
438
439
440
441
442
443
444
445
446
447
448
449
450
451
452
453
454
455
456
457
458
459
460
461
462
463
464
465
466
467
468
469
470
471
472
473
474
475
476
477
478
479
480
481
482
483
484
485
486
487
class PerformanceAssessor:
    """A class to access the performance of a given function (memory or time).
    """

    def __init__(
        self,
        main: Callable,
        n_field: int = 0,
        **kwargs
    ):
        """Initialize a PerformanceAssessor object.

        Parameters
        ----------
        main : `Callable`
            The function to access.

        n_field : `int`, optional
            The number of field to keep in function name. By default 9.

            Let us say that we have a function named like this:

            ```sh
            /home/user/Document/some_function.py:000(function_name)
            ```

            If input we 2, we will have something like this:

            ```sh
            Document/some_function.py:000(function_name)
            ```

            We split at each "/" and keep the last two field.

        kwargs
            All possible arguments for the function to test.

        Example
        -------
        ```py
        assessor: PerformanceAssessor = PerformanceAssessor(
            main=function_to_test,
            n_field=1,
            argument_for_function_to_test="some_value"
        )
        ```
        """
        self.__assessed_function: Callable = main
        self.__function_argument: dict = dict(kwargs)
        self.__data: dict = {}
        self.__plot: "dict[go.Figure]" = {}
        self.__n_field: int = n_field

    def launch_profiling(
        self,
        do_memory: bool = True,
        do_time: bool = True
    ):
        """Launch the evaluation of performance (memory or time).

        Parameters
        ----------
        do_memory : `bool`, optional
            Do the memory evaluation. By default True.

        do_time : `bool`, optional
            Do the time evaluation. By default True.

        Raises
        ------
        `ValueError`
            When both `do_memory` and `do_time` are set to `False`.
        """
        if not do_memory and not do_time:
            raise ValueError("[Err##] One value between \"do_memory\" or "
                             "\"do_time\" have to set to `True`.")

        if do_memory:
            # Starting to check memory usage.
            tracemalloc.start()

        if do_time:
            profile: Profile = Profile()
            # Starting to check time usage.
            profile.enable()

        # Launch the function to test.
        self.__assessed_function(**self.__function_argument)

        if do_memory:
            # Get a traceback of memory usage.
            snapshot = tracemalloc.take_snapshot()

            # Create the plot for evaluating memory usage.
            self.__memory_evaluation(stat_memory=snapshot.statistics("lineno"))

            # Stop to check memory usage.
            tracemalloc.stop()

        if do_time:
            # Stop to check time usage.
            profile.disable()

            # Create the plot for evaluating memory usage.
            self.__time_evaluation(profile=profile)

    def __memory_evaluation(
        self,
        stat_memory: tracemalloc.Statistic
    ):
        """Parsed memory evaluation output and set a plot.

        Parameters
        ----------
        stat_memory : `tracemalloc.Statistic`
            The "assessor".
        """
        stat_dict = {}

        # Setting the dataset for memory usage.
        for stat in stat_memory:
            key: str = str(stat).split()[0]

            if self.__n_field > 0:
                key = key.split(sep="/")
                key = key[len(key) - self.__n_field:]
                key = "/".join(key)

            if key not in stat_dict:
                stat_dict[key] = 0

            stat_dict[key] += stat.size

        # Save data into member.
        self.__data["memory_evaluation"] = {
            "head": np.array(["size (Mib)", "function"]),
            "label": np.array(list(stat_dict.keys())),
            "data": np.array([list(stat_dict.values())]).T / 1024
        }

        # "Pre-draw" the plot for time usage.
        self.__plot["memory_evaluation"] = self.__set_plot(
            head=np.array(["size (Mib)", "function"]),
            label=np.array(list(stat_dict.keys())),
            data=np.array([list(stat_dict.values())]).T / 1024
        )

    def __time_evaluation(
        self,
        profile: Profile
    ):
        """Parsed time evaluation output and set a plot.

        Parameters
        ----------
        profile : `Profile`
            The "assessor".
        """
        # Redirect print stdout into a buffer.
        buffer: StringIO = StringIO()
        sys.stdout = buffer

        # Get the traceback of time execution.
        stat_time: Stats = Stats(profile).strip_dirs().print_stats()
        stat_time = buffer.getvalue()

        # Set the print stdout standard behaviour.
        sys.stdout = sys.__stdout__

        skip_line: bool = True
        data_label: np.array = np.array([])
        numeric_data: np.array = np.array([])

        # Setting the dataset for time usage.
        for line in stat_time.strip().split("\n"):
            if "ncalls" in line:
                data_head: np.array = np.array(line.split())
                # Add units (seconds) to each header, but the first one.
                data_head = np.char.add(data_head, ["", *[" (s)"] * 4, ""])

                # pylint: disable=C0103
                # It is not a constant pylint!
                skip_line = False
                # pylint: enable=C0103
                continue

            if skip_line or "/" in line:
                continue

            line: list = line.split(maxsplit=5)

            data_label = np.append(data_label, line[-1])

            if numeric_data.shape[0] == 0:
                numeric_data = np.array(line[:-1])
            else:
                numeric_data = np.vstack((numeric_data, line[:-1]))

        numeric_data = numeric_data.astype(float)

        # Save data into member.
        self.__data["time_evaluation"] = {
            "head": data_head,
            "label": data_label,
            "data": numeric_data
        }

        # "Pre-draw" the plot for time usage.
        self.__plot["time_evaluation"] = self.__set_plot(
            head=data_head,
            label=data_label,
            data=numeric_data
        )

    # pylint: disable=too-many-arguments, too-many-instance-attributes
    # Special class that needs a lot of data to be set up. Using a dataclass
    # would be unnecessary, as far as it would just simply take more memory
    # for nothing.

    def __set_plot(
        self,
        head: np.array,
        label: np.array,
        data: np.array,
        foreground: str = "#2E2E3E",
        background: str = "rgba(0, 0, 0, 0)"
    ) -> go.Figure:
        """Set a Plotly bar plot based on computed evaluation.

        Parameters
        ----------
        head : `np.array`
            The data header (like ncall). Or like one label per column.

        label : `np.array`
            The data label (like functions names). Or like one label per row.

        data : `np.array`
            The numerical data.

        foreground : `str`, optional
            The "foreground" color. By default "#2E2E3E".

        background : `str`, optional
            The "background" color. By default "rgba(0, 0, 0, 0)".

        Returns
        -------
        go.Figure
            The setted Plotly bar plot.
        """
        plot: object = go.Figure()

        sort_i: np.array = np.flip(np.argsort(data.T[0]))

        # Trace the barplot
        plot.add_trace(go.Bar(
            x=label[sort_i],
            y=data.T[0][sort_i],
            marker_line_color=foreground,
            marker_color=foreground
        ))

        # Modify general plot properties.
        plot.update_layout(
            template="plotly_white",
            margin={"r": 5},
            font={"size": 12, "family": "Roboto Light"},
            xaxis={
                "title": f"<b>Tested function ({head[-1]})</b>",
                "showline": True,
                "linewidth": 1,
                "showgrid": False,
                "title_font": {"family": "Roboto Black"},
                "tickfont": {"size": 12}
            },
            yaxis={
                "title": f"<b>{head[0].capitalize()}</b>",
                "showline": True,
                "linewidth": 1,
                "showgrid": False,
                "title_font": {"family": "Roboto Black"},
                "tickfont": {"size": 12}
            },
            title_font={"family": "Roboto Black"},
            plot_bgcolor=background,
            paper_bgcolor=background,
        )

        # Add the rectangle border.
        plot.add_shape(
            type="rect",
            xref="paper",
            yref="paper",
            x0=0,
            y0=0,
            x1=1,
            y1=1,
            line={"width": 2, "color": foreground}
        )

        plot.update_traces()

        # Adding the dropdown menu in the case of multiple datas.
        if data.T.shape[0] != 1:
            # Add the dropdown to the plot.
            plot.update_layout(updatemenus=self.__add_dropdown(
                foreground=foreground,
                head=head,
                label=label,
                data=data
            ))

        return plot

    # pylint: enable=too-many-arguments

    def __add_dropdown(
        self,
        head: np.array,
        label: np.array,
        data: np.array,
        foreground: str
    ) -> list:
        """Add a dropdown to the Plotly plot, in order to select different
        assessed values.

        Parameters
        ----------
        head : `np.array`
            The data header (like ncall). Or like one label per column.

        label : `np.array`
            The data label (like functions names). Or like one label per row.

        data : `np.array`
            The numerical data.

        foreground : `str`
            The "foreground" color.

        Returns
        -------
        `list`
            The dropdown menu, which is a `update_menu`.
        """
        button: list = []

        for i, label_i in enumerate(head[:-1]):
            sort_i: np.array = np.flip(np.argsort(data.T[i]))

            # Add a element in the dropdown. By selecting it, it will modify
            # the plot.
            button += [{
                "method": "update",
                "label": label_i,
                "args": [
                    # Restyling.
                    {"x": [label[sort_i]], "y": [data.T[i][sort_i]]},
                    # Updating.
                    {"yaxis": {
                        "showline": True,
                        "linewidth": 1,
                        "showgrid": False,
                        "title": {
                            "text": f"<b>{label_i.capitalize()}</b>",
                            "font": {"family": "Roboto Black"}
                        },
                        "tickfont": {"size": 12}
                    }}
                ],
            }]

        # Create the dropdown menu.
        update_menu: list = [{
            "buttons": button,
            "type": "dropdown",
            "direction": "down",
            "showactive": True,
            "x": 1,
            "xanchor": "right",
            "y": 1.01,
            "yanchor": "bottom",
            "bgcolor": "#FFF",
            "bordercolor": foreground,
            "borderwidth": 2,
            "font_color": foreground
        }]

        return update_menu

    def plot(
        self,
        path: str = "./"
    ):
        """Save the plot to a `.html` file.

        Parameters
        ----------
        path : `str`, optional
            The path to save the file, which have to be a directory. By default
            "./".

        Raises
        ------
        `FileNotFoundError`
            If the input path does not exist.

        `ValueError`
            If the input path is not a directory.
        """
        if path.endswith("/"):
            path = path[:-1]

        if not exists(path):
            raise FileNotFoundError(f"[Err##] Given path \"{path}\" does not "
                                    "exist.")
        if not isdir(path):
            raise ValueError(f"[Err##] Given path  \"{path}\" is not "
                             "directory.")

        for key, plot_i in self.__plot.items():
            # Save the plot.
            plot_i.write_html(
                file=f"{path}/{key}.html",
                include_plotlyjs=True,
                full_html=True
            )

    def get_plot(self) -> dict:
        """Get the setted plot.

        Returns
        -------
        `dict`
            The setted plot.
        """
        return self.__plot

    def data(self) -> dict:
        """Get computed data.

        Returns
        -------
        `dict`
            Computed data about time or memory usage.

        Raises
        ------
        `ValueError`
            _description_
        """
        if not self.__data:
            raise ValueError("[Err##] You have to computed properties before"
                             "getting them. For this, use"
                             "`self.launch_profiling()`.")

        return self.__data

__add_dropdown(head, label, data, foreground)

Add a dropdown to the Plotly plot, in order to select different
assessed values.

Parameters

head : np.array
The data header (like ncall). Or like one label per column.

np.array

The data label (like functions names). Or like one label per row.

np.array

The numerical data.

str

The "foreground" color.

Returns

list
The dropdown menu, which is a update_menu.

Source code in src/perfassess/class_performance_assessor.py
347
348
349
350
351
352
353
354
355
356
357
358
359
360
361
362
363
364
365
366
367
368
369
370
371
372
373
374
375
376
377
378
379
380
381
382
383
384
385
386
387
388
389
390
391
392
393
394
395
396
397
398
399
400
401
402
403
404
405
406
407
408
409
410
411
412
413
414
415
416
417
418
419
def __add_dropdown(
    self,
    head: np.array,
    label: np.array,
    data: np.array,
    foreground: str
) -> list:
    """Add a dropdown to the Plotly plot, in order to select different
    assessed values.

    Parameters
    ----------
    head : `np.array`
        The data header (like ncall). Or like one label per column.

    label : `np.array`
        The data label (like functions names). Or like one label per row.

    data : `np.array`
        The numerical data.

    foreground : `str`
        The "foreground" color.

    Returns
    -------
    `list`
        The dropdown menu, which is a `update_menu`.
    """
    button: list = []

    for i, label_i in enumerate(head[:-1]):
        sort_i: np.array = np.flip(np.argsort(data.T[i]))

        # Add a element in the dropdown. By selecting it, it will modify
        # the plot.
        button += [{
            "method": "update",
            "label": label_i,
            "args": [
                # Restyling.
                {"x": [label[sort_i]], "y": [data.T[i][sort_i]]},
                # Updating.
                {"yaxis": {
                    "showline": True,
                    "linewidth": 1,
                    "showgrid": False,
                    "title": {
                        "text": f"<b>{label_i.capitalize()}</b>",
                        "font": {"family": "Roboto Black"}
                    },
                    "tickfont": {"size": 12}
                }}
            ],
        }]

    # Create the dropdown menu.
    update_menu: list = [{
        "buttons": button,
        "type": "dropdown",
        "direction": "down",
        "showactive": True,
        "x": 1,
        "xanchor": "right",
        "y": 1.01,
        "yanchor": "bottom",
        "bgcolor": "#FFF",
        "bordercolor": foreground,
        "borderwidth": 2,
        "font_color": foreground
    }]

    return update_menu

__init__(main, n_field=0, **kwargs)

Initialize a PerformanceAssessor object.

Parameters

main : Callable
The function to access.

int, optional

The number of field to keep in function name. By default 9.

Let us say that we have a function named like this:

/home/user/Document/some_function.py:000(function_name)

If input we 2, we will have something like this:

Document/some_function.py:000(function_name)

We split at each "/" and keep the last two field.

kwargs
All possible arguments for the function to test.

Example
assessor: PerformanceAssessor = PerformanceAssessor(
    main=function_to_test,
    n_field=1,
    argument_for_function_to_test="some_value"
)
Source code in src/perfassess/class_performance_assessor.py
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
def __init__(
    self,
    main: Callable,
    n_field: int = 0,
    **kwargs
):
    """Initialize a PerformanceAssessor object.

    Parameters
    ----------
    main : `Callable`
        The function to access.

    n_field : `int`, optional
        The number of field to keep in function name. By default 9.

        Let us say that we have a function named like this:

        ```sh
        /home/user/Document/some_function.py:000(function_name)
        ```

        If input we 2, we will have something like this:

        ```sh
        Document/some_function.py:000(function_name)
        ```

        We split at each "/" and keep the last two field.

    kwargs
        All possible arguments for the function to test.

    Example
    -------
    ```py
    assessor: PerformanceAssessor = PerformanceAssessor(
        main=function_to_test,
        n_field=1,
        argument_for_function_to_test="some_value"
    )
    ```
    """
    self.__assessed_function: Callable = main
    self.__function_argument: dict = dict(kwargs)
    self.__data: dict = {}
    self.__plot: "dict[go.Figure]" = {}
    self.__n_field: int = n_field

__memory_evaluation(stat_memory)

Parsed memory evaluation output and set a plot.

Parameters

stat_memory : tracemalloc.Statistic
The "assessor".

Source code in src/perfassess/class_performance_assessor.py
136
137
138
139
140
141
142
143
144
145
146
147
148
149
150
151
152
153
154
155
156
157
158
159
160
161
162
163
164
165
166
167
168
169
170
171
172
173
174
175
def __memory_evaluation(
    self,
    stat_memory: tracemalloc.Statistic
):
    """Parsed memory evaluation output and set a plot.

    Parameters
    ----------
    stat_memory : `tracemalloc.Statistic`
        The "assessor".
    """
    stat_dict = {}

    # Setting the dataset for memory usage.
    for stat in stat_memory:
        key: str = str(stat).split()[0]

        if self.__n_field > 0:
            key = key.split(sep="/")
            key = key[len(key) - self.__n_field:]
            key = "/".join(key)

        if key not in stat_dict:
            stat_dict[key] = 0

        stat_dict[key] += stat.size

    # Save data into member.
    self.__data["memory_evaluation"] = {
        "head": np.array(["size (Mib)", "function"]),
        "label": np.array(list(stat_dict.keys())),
        "data": np.array([list(stat_dict.values())]).T / 1024
    }

    # "Pre-draw" the plot for time usage.
    self.__plot["memory_evaluation"] = self.__set_plot(
        head=np.array(["size (Mib)", "function"]),
        label=np.array(list(stat_dict.keys())),
        data=np.array([list(stat_dict.values())]).T / 1024
    )

__set_plot(head, label, data, foreground='#2E2E3E', background='rgba(0, 0, 0, 0)')

Set a Plotly bar plot based on computed evaluation.

Parameters

head : np.array
The data header (like ncall). Or like one label per column.

np.array

The data label (like functions names). Or like one label per row.

np.array

The numerical data.

str, optional

The "foreground" color. By default "#2E2E3E".

str, optional

The "background" color. By default "rgba(0, 0, 0, 0)".

Returns

go.Figure
The setted Plotly bar plot.

Source code in src/perfassess/class_performance_assessor.py
249
250
251
252
253
254
255
256
257
258
259
260
261
262
263
264
265
266
267
268
269
270
271
272
273
274
275
276
277
278
279
280
281
282
283
284
285
286
287
288
289
290
291
292
293
294
295
296
297
298
299
300
301
302
303
304
305
306
307
308
309
310
311
312
313
314
315
316
317
318
319
320
321
322
323
324
325
326
327
328
329
330
331
332
333
334
335
336
337
338
339
340
341
342
343
def __set_plot(
    self,
    head: np.array,
    label: np.array,
    data: np.array,
    foreground: str = "#2E2E3E",
    background: str = "rgba(0, 0, 0, 0)"
) -> go.Figure:
    """Set a Plotly bar plot based on computed evaluation.

    Parameters
    ----------
    head : `np.array`
        The data header (like ncall). Or like one label per column.

    label : `np.array`
        The data label (like functions names). Or like one label per row.

    data : `np.array`
        The numerical data.

    foreground : `str`, optional
        The "foreground" color. By default "#2E2E3E".

    background : `str`, optional
        The "background" color. By default "rgba(0, 0, 0, 0)".

    Returns
    -------
    go.Figure
        The setted Plotly bar plot.
    """
    plot: object = go.Figure()

    sort_i: np.array = np.flip(np.argsort(data.T[0]))

    # Trace the barplot
    plot.add_trace(go.Bar(
        x=label[sort_i],
        y=data.T[0][sort_i],
        marker_line_color=foreground,
        marker_color=foreground
    ))

    # Modify general plot properties.
    plot.update_layout(
        template="plotly_white",
        margin={"r": 5},
        font={"size": 12, "family": "Roboto Light"},
        xaxis={
            "title": f"<b>Tested function ({head[-1]})</b>",
            "showline": True,
            "linewidth": 1,
            "showgrid": False,
            "title_font": {"family": "Roboto Black"},
            "tickfont": {"size": 12}
        },
        yaxis={
            "title": f"<b>{head[0].capitalize()}</b>",
            "showline": True,
            "linewidth": 1,
            "showgrid": False,
            "title_font": {"family": "Roboto Black"},
            "tickfont": {"size": 12}
        },
        title_font={"family": "Roboto Black"},
        plot_bgcolor=background,
        paper_bgcolor=background,
    )

    # Add the rectangle border.
    plot.add_shape(
        type="rect",
        xref="paper",
        yref="paper",
        x0=0,
        y0=0,
        x1=1,
        y1=1,
        line={"width": 2, "color": foreground}
    )

    plot.update_traces()

    # Adding the dropdown menu in the case of multiple datas.
    if data.T.shape[0] != 1:
        # Add the dropdown to the plot.
        plot.update_layout(updatemenus=self.__add_dropdown(
            foreground=foreground,
            head=head,
            label=label,
            data=data
        ))

    return plot

__time_evaluation(profile)

Parsed time evaluation output and set a plot.

Parameters

profile : Profile
The "assessor".

Source code in src/perfassess/class_performance_assessor.py
177
178
179
180
181
182
183
184
185
186
187
188
189
190
191
192
193
194
195
196
197
198
199
200
201
202
203
204
205
206
207
208
209
210
211
212
213
214
215
216
217
218
219
220
221
222
223
224
225
226
227
228
229
230
231
232
233
234
235
236
237
238
239
240
241
242
def __time_evaluation(
    self,
    profile: Profile
):
    """Parsed time evaluation output and set a plot.

    Parameters
    ----------
    profile : `Profile`
        The "assessor".
    """
    # Redirect print stdout into a buffer.
    buffer: StringIO = StringIO()
    sys.stdout = buffer

    # Get the traceback of time execution.
    stat_time: Stats = Stats(profile).strip_dirs().print_stats()
    stat_time = buffer.getvalue()

    # Set the print stdout standard behaviour.
    sys.stdout = sys.__stdout__

    skip_line: bool = True
    data_label: np.array = np.array([])
    numeric_data: np.array = np.array([])

    # Setting the dataset for time usage.
    for line in stat_time.strip().split("\n"):
        if "ncalls" in line:
            data_head: np.array = np.array(line.split())
            # Add units (seconds) to each header, but the first one.
            data_head = np.char.add(data_head, ["", *[" (s)"] * 4, ""])

            # pylint: disable=C0103
            # It is not a constant pylint!
            skip_line = False
            # pylint: enable=C0103
            continue

        if skip_line or "/" in line:
            continue

        line: list = line.split(maxsplit=5)

        data_label = np.append(data_label, line[-1])

        if numeric_data.shape[0] == 0:
            numeric_data = np.array(line[:-1])
        else:
            numeric_data = np.vstack((numeric_data, line[:-1]))

    numeric_data = numeric_data.astype(float)

    # Save data into member.
    self.__data["time_evaluation"] = {
        "head": data_head,
        "label": data_label,
        "data": numeric_data
    }

    # "Pre-draw" the plot for time usage.
    self.__plot["time_evaluation"] = self.__set_plot(
        head=data_head,
        label=data_label,
        data=numeric_data
    )

data()

Get computed data.

Returns

dict
Computed data about time or memory usage.

Raises

ValueError
description

Source code in src/perfassess/class_performance_assessor.py
469
470
471
472
473
474
475
476
477
478
479
480
481
482
483
484
485
486
487
def data(self) -> dict:
    """Get computed data.

    Returns
    -------
    `dict`
        Computed data about time or memory usage.

    Raises
    ------
    `ValueError`
        _description_
    """
    if not self.__data:
        raise ValueError("[Err##] You have to computed properties before"
                         "getting them. For this, use"
                         "`self.launch_profiling()`.")

    return self.__data

get_plot()

Get the setted plot.

Returns

dict
The setted plot.

Source code in src/perfassess/class_performance_assessor.py
459
460
461
462
463
464
465
466
467
def get_plot(self) -> dict:
    """Get the setted plot.

    Returns
    -------
    `dict`
        The setted plot.
    """
    return self.__plot

launch_profiling(do_memory=True, do_time=True)

Launch the evaluation of performance (memory or time).

Parameters

do_memory : bool, optional
Do the memory evaluation. By default True.

bool, optional

Do the time evaluation. By default True.

Raises

ValueError
When both do_memory and do_time are set to False.

Source code in src/perfassess/class_performance_assessor.py
 83
 84
 85
 86
 87
 88
 89
 90
 91
 92
 93
 94
 95
 96
 97
 98
 99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
def launch_profiling(
    self,
    do_memory: bool = True,
    do_time: bool = True
):
    """Launch the evaluation of performance (memory or time).

    Parameters
    ----------
    do_memory : `bool`, optional
        Do the memory evaluation. By default True.

    do_time : `bool`, optional
        Do the time evaluation. By default True.

    Raises
    ------
    `ValueError`
        When both `do_memory` and `do_time` are set to `False`.
    """
    if not do_memory and not do_time:
        raise ValueError("[Err##] One value between \"do_memory\" or "
                         "\"do_time\" have to set to `True`.")

    if do_memory:
        # Starting to check memory usage.
        tracemalloc.start()

    if do_time:
        profile: Profile = Profile()
        # Starting to check time usage.
        profile.enable()

    # Launch the function to test.
    self.__assessed_function(**self.__function_argument)

    if do_memory:
        # Get a traceback of memory usage.
        snapshot = tracemalloc.take_snapshot()

        # Create the plot for evaluating memory usage.
        self.__memory_evaluation(stat_memory=snapshot.statistics("lineno"))

        # Stop to check memory usage.
        tracemalloc.stop()

    if do_time:
        # Stop to check time usage.
        profile.disable()

        # Create the plot for evaluating memory usage.
        self.__time_evaluation(profile=profile)

plot(path='./')

Save the plot to a .html file.

Parameters

path : str, optional
The path to save the file, which have to be a directory. By default
"./".

Raises

FileNotFoundError
If the input path does not exist.

ValueError
If the input path is not a directory.

Source code in src/perfassess/class_performance_assessor.py
421
422
423
424
425
426
427
428
429
430
431
432
433
434
435
436
437
438
439
440
441
442
443
444
445
446
447
448
449
450
451
452
453
454
455
456
457
def plot(
    self,
    path: str = "./"
):
    """Save the plot to a `.html` file.

    Parameters
    ----------
    path : `str`, optional
        The path to save the file, which have to be a directory. By default
        "./".

    Raises
    ------
    `FileNotFoundError`
        If the input path does not exist.

    `ValueError`
        If the input path is not a directory.
    """
    if path.endswith("/"):
        path = path[:-1]

    if not exists(path):
        raise FileNotFoundError(f"[Err##] Given path \"{path}\" does not "
                                "exist.")
    if not isdir(path):
        raise ValueError(f"[Err##] Given path  \"{path}\" is not "
                         "directory.")

    for key, plot_i in self.__plot.items():
        # Save the plot.
        plot_i.write_html(
            file=f"{path}/{key}.html",
            include_plotlyjs=True,
            full_html=True
        )