pfd.exploration.inference package

Submodules

pfd.exploration.inference.eval_dp module

class pfd.exploration.inference.eval_dp.DPTest(model: Path | str | None = None, data: Path | str | None = None, **kwargs)[source]

Bases: EvalModel

classmethod args()[source]
classmethod doc()[source]
evaluate(name: str = 'default', prefix: Path | str = './', **kwargs)[source]
inference(name: str = 'default', prefix: Path | str = './', **kwargs)[source]
load_model(model: Path | str, **kwargs)[source]
read_data(data, fmt='deepmd/npy', **kwargs)[source]
read_data_unlabeled(data, fmt='deepmd/npy', **kwargs)[source]

pfd.exploration.inference.eval_model module

class pfd.exploration.inference.eval_model.EvalModel(model: Path | str | None = None, data: Path | str | None = None, **kwargs)[source]

Bases: ABC

The base class for inference and evaluation.

Parameters:

ABC (_type_) – _description_

Returns:

_description_

Return type:

_type_

clear_data()[source]
clear_model()[source]
property data
abstractmethod evaluate(**kwargs)[source]
static get_driver(key: str)[source]

Get a driver for ModelEval

Parameters:

key (str) – _description_

Raises:

RuntimeError – _description_

Returns:

_description_

Return type:

_type_

static get_drivers() dict[source]

Get all drivers

Returns:

all drivers

Return type:

dict

abstractmethod inference(**kwargs)[source]
abstractmethod load_model(model: Path | str, **kwargs)[source]
property model
abstractmethod read_data(data: Path | str, **kwargs)[source]
abstractmethod read_data_unlabeled(data: Path | str, **kwargs)[source]
static register(key: str)[source]

Register a model interface. Used as decorators

Parameters:

key (str) – key of the model

class pfd.exploration.inference.eval_model.TestReport(name: str = 'default_system', system: dpdata.system.System | None = None, atom_numb: int = 0, numb_frame: int = 0, mae_f: float = 0, rmse_f: float = 0, mae_e: float = 0, rmse_e: float = 0, mae_e_atom: float = 0, rmse_e_atom: float = 0, mae_v: float = 0, rmse_v: float = 0, lab_e: numpy.ndarray | None = None, pred_e: numpy.ndarray | None = None, lab_f: numpy.ndarray | None = None, pred_f: numpy.ndarray | None = None, lab_v: numpy.ndarray | None = None, pred_v: numpy.ndarray | None = None)[source]

Bases: object

atom_numb: int = 0
lab_e: ndarray | None = None
lab_f: ndarray | None = None
lab_v: ndarray | None = None
mae_e: float = 0
mae_e_atom: float = 0
mae_f: float = 0
mae_v: float = 0
name: str = 'default_system'
numb_frame: int = 0
pred_e: ndarray | None = None
pred_f: ndarray | None = None
pred_v: ndarray | None = None
report()[source]
rmse_e: float = 0
rmse_e_atom: float = 0
rmse_f: float = 0
rmse_v: float = 0
system: System | None = None
class pfd.exploration.inference.eval_model.TestReports(name: str = 'default_reports')[source]

Bases: object

add_report(report: TestReport)[source]
get_and_output_systems(prefix: Path | str = '.')[source]
get_nframes()[source]
get_systems()[source]
get_weighted_rmse_e_atom()[source]
get_weighted_rmse_f()[source]
sub_reports(index)[source]

pfd.exploration.inference.util module

pfd.exploration.inference.util.get_mae(test_arr, orig_arr) float[source]
pfd.exploration.inference.util.get_rmse(test_arr, orig_arr) float[source]

Module contents