How to use the pyflink.util.utils function in pyflink

To help you get started, we’ve selected a few pyflink examples, based on popular ways it is used in public projects.

Secure your code as it's written. Use Snyk Code to scan source code in minutes - no build needed - and fix issues immediately.

github apache / flink / flink-python / pyflink / testing / source_sink_utils.py View on Github external
def __init__(self, j_table_sink, field_names, field_types):
        gateway = get_gateway()
        j_field_names = utils.to_jarray(gateway.jvm.String, field_names)
        j_field_types = utils.to_jarray(gateway.jvm.TypeInformation,
                                        [_to_java_type(field_type) for field_type in field_types])
        j_table_sink = j_table_sink.configure(j_field_names, j_field_types)
        super(TestTableSink, self).__init__(j_table_sink)
github apache / flink / flink-python / pyflink / testing / source_sink_utils.py View on Github external
def __init__(self, j_table_sink, field_names, field_types):
        gateway = get_gateway()
        j_field_names = utils.to_jarray(gateway.jvm.String, field_names)
        j_field_types = utils.to_jarray(gateway.jvm.TypeInformation,
                                        [_to_java_type(field_type) for field_type in field_types])
        j_table_sink = j_table_sink.configure(j_field_names, j_field_types)
        super(TestTableSink, self).__init__(j_table_sink)
github apache / flink / flink-python / pyflink / table / sinks.py View on Github external
def __init__(self, field_names, field_types, path, field_delimiter=',', num_files=-1,
                 write_mode=None):
        # type: (list[str], list[DataType], str, str, int, int) -> None
        gateway = get_gateway()
        if write_mode == WriteMode.NO_OVERWRITE:
            j_write_mode = gateway.jvm.org.apache.flink.core.fs.FileSystem.WriteMode.NO_OVERWRITE
        elif write_mode == WriteMode.OVERWRITE:
            j_write_mode = gateway.jvm.org.apache.flink.core.fs.FileSystem.WriteMode.OVERWRITE
        elif write_mode is None:
            j_write_mode = None
        else:
            raise Exception('Unsupported write_mode: %s' % write_mode)
        j_csv_table_sink = gateway.jvm.CsvTableSink(
            path, field_delimiter, num_files, j_write_mode)
        j_field_names = utils.to_jarray(gateway.jvm.String, field_names)
        j_field_types = utils.to_jarray(gateway.jvm.TypeInformation,
                                        [_to_java_type(field_type) for field_type in field_types])
        j_csv_table_sink = j_csv_table_sink.configure(j_field_names, j_field_types)
        super(CsvTableSink, self).__init__(j_csv_table_sink)
github apache / flink / flink-python / pyflink / table / sinks.py View on Github external
def __init__(self, field_names, field_types, path, field_delimiter=',', num_files=-1,
                 write_mode=None):
        # type: (list[str], list[DataType], str, str, int, int) -> None
        gateway = get_gateway()
        if write_mode == WriteMode.NO_OVERWRITE:
            j_write_mode = gateway.jvm.org.apache.flink.core.fs.FileSystem.WriteMode.NO_OVERWRITE
        elif write_mode == WriteMode.OVERWRITE:
            j_write_mode = gateway.jvm.org.apache.flink.core.fs.FileSystem.WriteMode.OVERWRITE
        elif write_mode is None:
            j_write_mode = None
        else:
            raise Exception('Unsupported write_mode: %s' % write_mode)
        j_csv_table_sink = gateway.jvm.CsvTableSink(
            path, field_delimiter, num_files, j_write_mode)
        j_field_names = utils.to_jarray(gateway.jvm.String, field_names)
        j_field_types = utils.to_jarray(gateway.jvm.TypeInformation,
                                        [_to_java_type(field_type) for field_type in field_types])
        j_csv_table_sink = j_csv_table_sink.configure(j_field_names, j_field_types)
        super(CsvTableSink, self).__init__(j_csv_table_sink)
github apache / flink / flink-python / pyflink / table / udf.py View on Github external
def _create_judf(self, is_blink_planner, table_config):
        func = self._func
        if not isinstance(self._func, UserDefinedFunction):
            func = DelegatingScalarFunction(self._func)

        import cloudpickle
        serialized_func = cloudpickle.dumps(func)

        gateway = get_gateway()
        j_input_types = utils.to_jarray(gateway.jvm.TypeInformation,
                                        [_to_java_type(i) for i in self._input_types])
        j_result_type = _to_java_type(self._result_type)
        if is_blink_planner:
            PythonTableUtils = gateway.jvm\
                .org.apache.flink.table.planner.utils.python.PythonTableUtils
            j_scalar_function = PythonTableUtils \
                .createPythonScalarFunction(table_config,
                                            self._name,
                                            bytearray(serialized_func),
                                            j_input_types,
                                            j_result_type,
                                            self._deterministic,
                                            _get_python_env())
        else:
            PythonTableUtils = gateway.jvm.PythonTableUtils
            j_scalar_function = PythonTableUtils \
github apache / flink / flink-python / pyflink / table / sources.py View on Github external
def __init__(self, source_path, field_names, field_types):
        # type: (str, list[str], list[DataType]) -> None
        gateway = get_gateway()
        j_field_names = utils.to_jarray(gateway.jvm.String, field_names)
        j_field_types = utils.to_jarray(gateway.jvm.TypeInformation,
                                        [_to_java_type(field_type)
                                         for field_type in field_types])
        super(CsvTableSource, self).__init__(
            gateway.jvm.CsvTableSource(source_path, j_field_names, j_field_types))
github apache / flink / flink-python / pyflink / table / table_environment.py View on Github external
Scanning a table from a registered catalog
        ::

            >>> tab = table_env.scan("catalogName", "dbName", "tableName")

        :param table_path: The path of the table to scan.
        :type table_path: str
        :throws: Exception if no table is found using the given table path.
        :return: The resulting table.
        :rtype: pyflink.table.Table

        .. note:: Deprecated in 1.10. Use :func:`from_path` instead.
        """
        warnings.warn("Deprecated in 1.10. Use from_path instead.", DeprecationWarning)
        gateway = get_gateway()
        j_table_paths = utils.to_jarray(gateway.jvm.String, table_path)
        j_table = self._j_tenv.scan(j_table_paths)
        return Table(j_table)
github apache / flink / flink-python / pyflink / table / sources.py View on Github external
def __init__(self, source_path, field_names, field_types):
        # type: (str, list[str], list[DataType]) -> None
        gateway = get_gateway()
        j_field_names = utils.to_jarray(gateway.jvm.String, field_names)
        j_field_types = utils.to_jarray(gateway.jvm.TypeInformation,
                                        [_to_java_type(field_type)
                                         for field_type in field_types])
        super(CsvTableSource, self).__init__(
            gateway.jvm.CsvTableSource(source_path, j_field_names, j_field_types))