U
    -e%                    @   sX  d dl mZ d dlmZ d dlmZ d dlmZmZ d dl	Z	d dl
Z
d dlmZ d dlZd dlZd dlZd dlZd dlZd dlZd dlZd dlmZ zd dlmZ W n8 ek
r Z zedee ddW 5 dZ[X Y nX d d	lmZmZm Z m!Z!m"Z"m#Z#m$Z$m%Z%m&Z&m'Z' d d
l(m)Z)m*Z*m+Z+m,Z,m-Z- d dlm.Z/ d dl0m1Z1m2Z2m3Z3m4Z4 dZ5dd Z6dd Z7dd Z8dnddZ9dZ:dd Z;e4dde;de<Z=G dd dZ>e?dZ@dd  ZAd!d" ZBd#d$ ZCd%ZDd&ZEG d'd( d(ZFd)d* ZGG d+d, d,ZHG d-d. d.ZIG d/d0 d0ZJG d1d2 d2ZKd3d4 ZLd5d6 ZMd7d8 ZNd9hZOG d:d; d;ZPdod<d=ZQd>ZRd?ZSd@ZTG dAdB dBZUdpdEdFZVdGdH ZWG dIdJ dJZXdKZYdLZZdddddMddMd dNdddMdddddddOdPdQZ[eY\dRdS]dTeSfdUe:eZe[_^dqdVdWZ_eY\dXdS]eSdYfdZe:d[e__^drd_d`Z`daZadb\eDeae`_^dcdd ZbdsdedfZcdtdgdhZddudidjZedvdkdlZfdmZgdS )w    )defaultdict)futures)nullcontext)partialreduceN)
CollectionzPThe pyarrow installation is not built with support for the Parquet file format ())
ParquetReader
StatisticsFileMetaDataRowGroupMetaDataColumnChunkMetaDataParquetSchemaColumnSchemaParquetLogicalTypeFileEncryptionPropertiesFileDecryptionProperties)LocalFileSystem
FileSystemFileType_resolve_filesystem_and_path_ensure_filesystem
filesystem)guid_is_path_like_stringify_path_deprecate_api)Zhdfsc                 C   s,   t | } tj| }|jtkr$|jS | S d S N)r   urllibparseurlparsescheme_URI_STRIP_SCHEMESpath)r$   Z
parsed_uri r%   U/var/www/html/Darija-Ai-Train/env/lib/python3.8/site-packages/pyarrow/parquet/core.py
_parse_uri<   s
    
r'   c                 C   s2   | d krt || S t | } t|}| |fS d S r   )legacyfsresolve_filesystem_and_pathr   r'   )Zpassed_filesystemr$   Zparsed_pathr%   r%   r&   _get_filesystem_and_pathG   s
    
r*   c                 C   sR   t | tr<| D ]*}t |tr&td}nd}||kr dS qnt | trNd| kS dS )Nr   T F)
isinstancebyteschrstr)valbyteZ
compare_tor%   r%   r&   _check_contains_nullP   s    




r2   Tc                 C   s   | dk	rt | dks&tdd | D r.tdt| d d trF| g} |r| D ]@}|D ]6\}}}t|tr|tdd |D st|rVtdqVqN| S )z+
    Check if filters are well-formed.
    Nr   c                 s   s   | ]}t |d kV  qdS )r   N)len.0fr%   r%   r&   	<genexpr>c   s     z!_check_filters.<locals>.<genexpr>zMalformed filtersc                 s   s   | ]}t |V  qd S r   )r2   )r5   vr%   r%   r&   r7   o   s     zBNull-terminated binary strings are not supported as filter values.)	r3   any
ValueErrorr,   r/   listallr2   NotImplementedError)filterscheck_null_stringsconjunctioncolopr0   r%   r%   r&   _check_filters^   s$    rC   a  Predicates are expressed using an ``Expression`` or using
    the disjunctive normal form (DNF), like ``[[('x', '=', 0), ...], ...]``.
    DNF allows arbitrary boolean logical combinations of single column predicates.
    The innermost tuples each describe a single column predicate. The list of inner
    predicates is interpreted as a conjunction (AND), forming a more selective and
    multiple column predicate. Finally, the most outer list combines these filters
    as a disjunction (OR).

    Predicates may also be passed as List[Tuple]. This form is interpreted
    as a single conjunction. To express OR in predicates, one must
    use the (preferred) List[List[Tuple]] notation.

    Each tuple has format: (``key``, ``op``, ``value``) and compares the
    ``key`` with the ``value``.
    The supported ``op`` are:  ``=`` or ``==``, ``!=``, ``<``, ``>``, ``<=``,
    ``>=``, ``in`` and ``not in``. If the ``op`` is ``in`` or ``not in``, the
    ``value`` must be a collection such as a ``list``, a ``set`` or a
    ``tuple``.

    Examples:

    Using the ``Expression`` API:

    .. code-block:: python

        import pyarrow.compute as pc
        pc.field('x') = 0
        pc.field('y').isin(['a', 'b', 'c'])
        ~pc.field('y').isin({'a', 'b'})

    Using the DNF format:

    .. code-block:: python

        ('x', '=', 0)
        ('y', 'in', ['a', 'b', 'c'])
        ('z', 'not in', {'a','b'})

    c                    sr   ddl m t| jr| S t| dd} fdd g }| D ](} fdd|D }|ttj| q<ttj	|S )	a  
    Check if filters are well-formed and convert to an ``Expression``.

    Parameters
    ----------
    filters : List[Tuple] or List[List[Tuple]]

    Notes
    -----
    See internal ``pyarrow._DNF_filter_doc`` attribute for more details.

    Examples
    --------

    >>> filters_to_expression([('foo', '==', 'bar')])
    <pyarrow.compute.Expression (foo == "bar")>

    Returns
    -------
    pyarrow.compute.Expression
        An Expression representing the filters
    r   NF)r?   c                    s     | }|dks|dkr"||kS |dkr2||kS |dkrB||k S |dkrR||kS |dkrb||kS |dkrr||kS |dkr||S |d	kr|| S td
| ||fd S )N===!=<><=>=innot inz,"{0}" is not a valid operator in predicates.)fieldisinr:   format)rA   rB   r0   rM   )dsr%   r&   convert_single_predicate   s,    

z7filters_to_expression.<locals>.convert_single_predicatec                    s   g | ]\}}} |||qS r%   r%   )r5   rA   rB   r0   )rQ   r%   r&   
<listcomp>   s   z)filters_to_expression.<locals>.<listcomp>)
pyarrow.datasetdatasetr,   
ExpressionrC   appendr   operatorand_or_)r>   Zdisjunction_membersr@   Zconjunction_membersr%   )rQ   rP   r&   filters_to_expression   s    
rZ   _filters_to_expressionz10.0.0c                   @   s   e Zd ZdZddddddddddddddZdd	 Zd
d Zdd Zedd Z	edd Z
edd Zedd Zd*edddZeedddZd+ddZd,dd Zd-d"d#Zd.d$d%Zd/d&d'Zd0d(d)ZdS )1ParquetFilea
  
    Reader interface for a single Parquet file.

    Parameters
    ----------
    source : str, pathlib.Path, pyarrow.NativeFile, or file-like object
        Readable source. For passing bytes or buffer-like file containing a
        Parquet file, use pyarrow.BufferReader.
    metadata : FileMetaData, default None
        Use existing metadata object, rather than reading from file.
    common_metadata : FileMetaData, default None
        Will be used in reads for pandas schema metadata if not found in the
        main file's metadata, no other uses at the moment.
    read_dictionary : list
        List of column names to read directly as DictionaryArray.
    memory_map : bool, default False
        If the source is a file path, use a memory map to read file, which can
        improve performance in some environments.
    buffer_size : int, default 0
        If positive, perform read buffering when deserializing individual
        column chunks. Otherwise IO calls are unbuffered.
    pre_buffer : bool, default False
        Coalesce and issue file reads in parallel to improve performance on
        high-latency filesystems (e.g. S3). If True, Arrow will use a
        background I/O thread pool.
    coerce_int96_timestamp_unit : str, default None
        Cast timestamps that are stored in INT96 format to a particular
        resolution (e.g. 'ms'). Setting to None is equivalent to 'ns'
        and therefore INT96 timestamps will be inferred as timestamps
        in nanoseconds.
    decryption_properties : FileDecryptionProperties, default None
        File decryption properties for Parquet Modular Encryption.
    thrift_string_size_limit : int, default None
        If not None, override the maximum total string size allocated
        when decoding Thrift structures. The default limit should be
        sufficient for most Parquet files.
    thrift_container_size_limit : int, default None
        If not None, override the maximum total size of containers allocated
        when decoding Thrift structures. The default limit should be
        sufficient for most Parquet files.
    filesystem : FileSystem, default None
        If nothing passed, will be inferred based on path.
        Path will try to be found in the local on-disk filesystem otherwise
        it will be parsed as an URI to determine the filesystem.

    Examples
    --------

    Generate an example PyArrow Table and write it to Parquet file:

    >>> import pyarrow as pa
    >>> table = pa.table({'n_legs': [2, 2, 4, 4, 5, 100],
    ...                   'animal': ["Flamingo", "Parrot", "Dog", "Horse",
    ...                              "Brittle stars", "Centipede"]})

    >>> import pyarrow.parquet as pq
    >>> pq.write_table(table, 'example.parquet')

    Create a ``ParquetFile`` object from the Parquet file:

    >>> parquet_file = pq.ParquetFile('example.parquet')

    Read the data:

    >>> parquet_file.read()
    pyarrow.Table
    n_legs: int64
    animal: string
    ----
    n_legs: [[2,2,4,4,5,100]]
    animal: [["Flamingo","Parrot","Dog","Horse","Brittle stars","Centipede"]]

    Create a ParquetFile object with "animal" column as DictionaryArray:

    >>> parquet_file = pq.ParquetFile('example.parquet',
    ...                               read_dictionary=["animal"])
    >>> parquet_file.read()
    pyarrow.Table
    n_legs: int64
    animal: dictionary<values=string, indices=int32, ordered=0>
    ----
    n_legs: [[2,2,4,4,5,100]]
    animal: [  -- dictionary:
    ["Flamingo","Parrot",...,"Brittle stars","Centipede"]  -- indices:
    [0,1,2,3,4,5]]
    NFr   )metadatacommon_metadataread_dictionary
memory_mapbuffer_size
pre_buffercoerce_int96_timestamp_unitdecryption_propertiesthrift_string_size_limitthrift_container_size_limitr   c                C   sr   t |dd| _t|||\}}|d k	r6||}d| _t | _| jj||||||||	|
|d
 || _|  | _	d S )NclosedT)	Zuse_memory_mapra   rb   r_   r]   rc   rd   re   rf   )
getattr_close_sourcer   open_input_filer	   readeropenr^   _build_nested_paths_nested_paths_by_prefix)selfsourcer]   r^   r_   r`   ra   rb   rc   rd   re   rf   r   r%   r%   r&   __init__F  s0      
   	zParquetFile.__init__c                 C   s   | S r   r%   ro   r%   r%   r&   	__enter__a  s    zParquetFile.__enter__c                 O   s   |    d S r   closero   argskwargsr%   r%   r&   __exit__d  s    zParquetFile.__exit__c                 C   sn   | j j}tt}t|D ]P\}}|d }|dd  }|| | |sHqd||d f}|dd  }q4q|S )Nr      .)rk   Zcolumn_pathsr   r;   	enumeraterV   join)ro   pathsresultir$   keyrestr%   r%   r&   rm   g  s    zParquetFile._build_nested_pathsc                 C   s   | j jS )z.
        Return the Parquet metadata.
        )rk   r]   rr   r%   r%   r&   r]   z  s    zParquetFile.metadatac                 C   s   | j jS )zG
        Return the Parquet schema, unconverted to Arrow types
        )r]   schemarr   r%   r%   r&   r     s    zParquetFile.schemac                 C   s   | j jS )a  
        Return the inferred Arrow schema, converted from the whole Parquet
        file's schema

        Examples
        --------
        Generate an example Parquet file:

        >>> import pyarrow as pa
        >>> table = pa.table({'n_legs': [2, 2, 4, 4, 5, 100],
        ...                   'animal': ["Flamingo", "Parrot", "Dog", "Horse",
        ...                              "Brittle stars", "Centipede"]})
        >>> import pyarrow.parquet as pq
        >>> pq.write_table(table, 'example.parquet')
        >>> parquet_file = pq.ParquetFile('example.parquet')

        Read the Arrow schema:

        >>> parquet_file.schema_arrow
        n_legs: int64
        animal: string
        )rk   schema_arrowrr   r%   r%   r&   r     s    zParquetFile.schema_arrowc                 C   s   | j jS )a.  
        Return the number of row groups of the Parquet file.

        Examples
        --------
        >>> import pyarrow as pa
        >>> table = pa.table({'n_legs': [2, 2, 4, 4, 5, 100],
        ...                   'animal': ["Flamingo", "Parrot", "Dog", "Horse",
        ...                              "Brittle stars", "Centipede"]})
        >>> import pyarrow.parquet as pq
        >>> pq.write_table(table, 'example.parquet')
        >>> parquet_file = pq.ParquetFile('example.parquet')

        >>> parquet_file.num_row_groups
        1
        )rk   num_row_groupsrr   r%   r%   r&   r     s    zParquetFile.num_row_groupsforcec                 C   s   | j s
|r| j  d S r   )ri   rk   ru   )ro   r   r%   r%   r&   ru     s    
zParquetFile.close)returnc                 C   s   | j jS r   )rk   rg   rr   r%   r%   r&   rg     s    zParquetFile.closedTc                 C   s    | j ||d}| jj|||dS )a  
        Read a single row group from a Parquet file.

        Parameters
        ----------
        i : int
            Index of the individual row group that we want to read.
        columns : list
            If not None, only these columns will be read from the row group. A
            column name may be a prefix of a nested field, e.g. 'a' will select
            'a.b', 'a.c', and 'a.d.e'.
        use_threads : bool, default True
            Perform multi-threaded column reads.
        use_pandas_metadata : bool, default False
            If True and file has custom pandas schema metadata, ensure that
            index columns are also loaded.

        Returns
        -------
        pyarrow.table.Table
            Content of the row group as a table (of columns)

        Examples
        --------
        >>> import pyarrow as pa
        >>> table = pa.table({'n_legs': [2, 2, 4, 4, 5, 100],
        ...                   'animal': ["Flamingo", "Parrot", "Dog", "Horse",
        ...                              "Brittle stars", "Centipede"]})
        >>> import pyarrow.parquet as pq
        >>> pq.write_table(table, 'example.parquet')
        >>> parquet_file = pq.ParquetFile('example.parquet')

        >>> parquet_file.read_row_group(0)
        pyarrow.Table
        n_legs: int64
        animal: string
        ----
        n_legs: [[2,2,4,4,5,100]]
        animal: [["Flamingo","Parrot",...,"Brittle stars","Centipede"]]
        use_pandas_metadatacolumn_indicesuse_threads)_get_column_indicesrk   read_row_group)ro   r   columnsr   r   r   r%   r%   r&   r     s    * 
zParquetFile.read_row_groupc                 C   s    | j ||d}| jj|||dS )a  
        Read a multiple row groups from a Parquet file.

        Parameters
        ----------
        row_groups : list
            Only these row groups will be read from the file.
        columns : list
            If not None, only these columns will be read from the row group. A
            column name may be a prefix of a nested field, e.g. 'a' will select
            'a.b', 'a.c', and 'a.d.e'.
        use_threads : bool, default True
            Perform multi-threaded column reads.
        use_pandas_metadata : bool, default False
            If True and file has custom pandas schema metadata, ensure that
            index columns are also loaded.

        Returns
        -------
        pyarrow.table.Table
            Content of the row groups as a table (of columns).

        Examples
        --------
        >>> import pyarrow as pa
        >>> table = pa.table({'n_legs': [2, 2, 4, 4, 5, 100],
        ...                   'animal': ["Flamingo", "Parrot", "Dog", "Horse",
        ...                              "Brittle stars", "Centipede"]})
        >>> import pyarrow.parquet as pq
        >>> pq.write_table(table, 'example.parquet')
        >>> parquet_file = pq.ParquetFile('example.parquet')

        >>> parquet_file.read_row_groups([0,0])
        pyarrow.Table
        n_legs: int64
        animal: string
        ----
        n_legs: [[2,2,4,4,5,...,2,4,4,5,100]]
        animal: [["Flamingo","Parrot","Dog",...,"Brittle stars","Centipede"]]
        r   r   )r   rk   read_row_groups)ro   
row_groupsr   r   r   r   r%   r%   r&   r     s    * zParquetFile.read_row_groups   c                 C   s<   |dkrt d| jj}| j||d}| jj||||d}|S )a  
        Read streaming batches from a Parquet file.

        Parameters
        ----------
        batch_size : int, default 64K
            Maximum number of records to yield per batch. Batches may be
            smaller if there aren't enough rows in the file.
        row_groups : list
            Only these row groups will be read from the file.
        columns : list
            If not None, only these columns will be read from the file. A
            column name may be a prefix of a nested field, e.g. 'a' will select
            'a.b', 'a.c', and 'a.d.e'.
        use_threads : boolean, default True
            Perform multi-threaded column reads.
        use_pandas_metadata : boolean, default False
            If True and file has custom pandas schema metadata, ensure that
            index columns are also loaded.

        Yields
        ------
        pyarrow.RecordBatch
            Contents of each batch as a record batch

        Examples
        --------
        Generate an example Parquet file:

        >>> import pyarrow as pa
        >>> table = pa.table({'n_legs': [2, 2, 4, 4, 5, 100],
        ...                   'animal': ["Flamingo", "Parrot", "Dog", "Horse",
        ...                              "Brittle stars", "Centipede"]})
        >>> import pyarrow.parquet as pq
        >>> pq.write_table(table, 'example.parquet')
        >>> parquet_file = pq.ParquetFile('example.parquet')
        >>> for i in parquet_file.iter_batches():
        ...     print("RecordBatch")
        ...     print(i.to_pandas())
        ...
        RecordBatch
           n_legs         animal
        0       2       Flamingo
        1       2         Parrot
        2       4            Dog
        3       4          Horse
        4       5  Brittle stars
        5     100      Centipede
        Nr   r   )r   r   r   )ranger]   r   r   rk   iter_batches)ro   
batch_sizer   r   r   r   r   Zbatchesr%   r%   r&   r     s    3 zParquetFile.iter_batchesc                 C   s   | j ||d}| jj||dS )aW  
        Read a Table from Parquet format.

        Parameters
        ----------
        columns : list
            If not None, only these columns will be read from the file. A
            column name may be a prefix of a nested field, e.g. 'a' will select
            'a.b', 'a.c', and 'a.d.e'.
        use_threads : bool, default True
            Perform multi-threaded column reads.
        use_pandas_metadata : bool, default False
            If True and file has custom pandas schema metadata, ensure that
            index columns are also loaded.

        Returns
        -------
        pyarrow.table.Table
            Content of the file as a table (of columns).

        Examples
        --------
        Generate an example Parquet file:

        >>> import pyarrow as pa
        >>> table = pa.table({'n_legs': [2, 2, 4, 4, 5, 100],
        ...                   'animal': ["Flamingo", "Parrot", "Dog", "Horse",
        ...                              "Brittle stars", "Centipede"]})
        >>> import pyarrow.parquet as pq
        >>> pq.write_table(table, 'example.parquet')
        >>> parquet_file = pq.ParquetFile('example.parquet')

        Read a Table:

        >>> parquet_file.read(columns=["animal"])
        pyarrow.Table
        animal: string
        ----
        animal: [["Flamingo","Parrot",...,"Brittle stars","Centipede"]]
        r   r   )r   rk   Zread_all)ro   r   r   r   r   r%   r%   r&   read[  s    ) zParquetFile.readc                 C   s   |  |}| jj||dS )a  
        Read contents of file for the given columns and batch size.

        Notes
        -----
        This function's primary purpose is benchmarking.
        The scan is executed on a single thread.

        Parameters
        ----------
        columns : list of integers, default None
            Select columns to read, if None scan all columns.
        batch_size : int, default 64K
            Number of rows to read at a time internally.

        Returns
        -------
        num_rows : int
            Number of rows in file

        Examples
        --------
        >>> import pyarrow as pa
        >>> table = pa.table({'n_legs': [2, 2, 4, 4, 5, 100],
        ...                   'animal': ["Flamingo", "Parrot", "Dog", "Horse",
        ...                              "Brittle stars", "Centipede"]})
        >>> import pyarrow.parquet as pq
        >>> pq.write_table(table, 'example.parquet')
        >>> parquet_file = pq.ParquetFile('example.parquet')

        >>> parquet_file.scan_contents()
        6
        )r   )r   rk   scan_contents)ro   r   r   r   r%   r%   r&   r     s    "
zParquetFile.scan_contentsc                    s   |d krd S g }|D ]}| j kr| j |  q|r jj} jd k	rR jjnd }|rld|krlt|}n|rd|krt|}ng }|d k	r|r| fdd|D 7 }|S )N   pandasc                    s"   g | ]}t |ts j|qS r%   )r,   dictrk   Zcolumn_name_idx)r5   descrrr   r%   r&   rR     s   
z3ParquetFile._get_column_indices.<locals>.<listcomp>)rn   extendr]   r^   _get_pandas_index_columns)ro   Zcolumn_namesr   indicesnameZfile_keyvaluesZcommon_keyvaluesindex_columnsr%   rr   r&   r     s,    



zParquetFile._get_column_indices)F)NTF)NTF)r   NNTF)NTF)Nr   )F)__name__
__module____qualname____doc__rq   rs   ry   rm   propertyr]   r   r   r   boolru   rg   r   r   r   r   r   r   r%   r%   r%   r&   r\      sN   W     



  
/  
0    
>
.
&r\   z[ ,;{}()
	=]c                 C   s   t d| S )N_)_SPARK_DISALLOWED_CHARSsub)r   r%   r%   r&   _sanitized_spark_field_name  s    r   c           	      C   s   d|krxg }d}| D ]J}|j }t|}||krTd}t||j|j|j}|| q|| qtj|| jd}||fS | dfS d S )NsparkFT)r]   )	r   r   parM   typeZnullabler]   rV   r   )	r   flavorZsanitized_fieldsschema_changedrM   r   Zsanitized_nameZsanitized_field
new_schemar%   r%   r&   _sanitize_schema  s"    
 r   c                    s8   d|kr0 fddt  jD }tjj||dS  S d S )Nr   c                    s   g | ]} | qS r%   r%   )r5   r   tabler%   r&   rR     s     z#_sanitize_table.<locals>.<listcomp>)r   )r   Znum_columnsr   Tablefrom_arrays)r   r   r   Zcolumn_datar%   r   r&   _sanitize_table  s    r   a  version : {"1.0", "2.4", "2.6"}, default "2.6"
    Determine which Parquet logical types are available for use, whether the
    reduced set from the Parquet 1.x.x format or the expanded logical types
    added in later format versions.
    Files written with version='2.4' or '2.6' may not be readable in all
    Parquet implementations, so version='1.0' is likely the choice that
    maximizes file compatibility.
    UINT32 and some logical types are only available with version '2.4'.
    Nanosecond timestamps are only available with version '2.6'.
    Other features such as compression algorithms or the new serialized
    data page format must be enabled separately (see 'compression' and
    'data_page_version').
use_dictionary : bool or list
    Specify if we should use dictionary encoding in general or only for
    some columns.
compression : str or dict
    Specify the compression codec, either on a general basis or per-column.
    Valid values: {'NONE', 'SNAPPY', 'GZIP', 'BROTLI', 'LZ4', 'ZSTD'}.
write_statistics : bool or list
    Specify if we should write statistics in general (default is True) or only
    for some columns.
use_deprecated_int96_timestamps : bool, default None
    Write timestamps to INT96 Parquet format. Defaults to False unless enabled
    by flavor argument. This take priority over the coerce_timestamps option.
coerce_timestamps : str, default None
    Cast timestamps to a particular resolution. If omitted, defaults are chosen
    depending on `version`. By default, for ``version='1.0'`` (the default)
    and ``version='2.4'``, nanoseconds are cast to microseconds ('us'), while
    for other `version` values, they are written natively without loss
    of resolution.  Seconds are always cast to milliseconds ('ms') by default,
    as Parquet does not have any temporal type with seconds resolution.
    If the casting results in loss of data, it will raise an exception
    unless ``allow_truncated_timestamps=True`` is given.
    Valid values: {None, 'ms', 'us'}
allow_truncated_timestamps : bool, default False
    Allow loss of data when coercing timestamps to a particular
    resolution. E.g. if microsecond or nanosecond data is lost when coercing to
    'ms', do not raise an exception. Passing ``allow_truncated_timestamp=True``
    will NOT result in the truncation exception being ignored unless
    ``coerce_timestamps`` is not None.
data_page_size : int, default None
    Set a target threshold for the approximate encoded size of data
    pages within a column chunk (in bytes). If None, use the default data page
    size of 1MByte.
flavor : {'spark'}, default None
    Sanitize schema or set other compatibility options to work with
    various target systems.
filesystem : FileSystem, default None
    If nothing passed, will be inferred from `where` if path-like, else
    `where` is already a file-like object so no filesystem is needed.
compression_level : int or dict, default None
    Specify the compression level for a codec, either on a general basis or
    per-column. If None is passed, arrow selects the compression level for
    the compression codec in use. The compression level has a different
    meaning for each codec, so you have to read the documentation of the
    codec you are using.
    An exception is thrown if the compression codec does not allow specifying
    a compression level.
use_byte_stream_split : bool or list, default False
    Specify if the byte_stream_split encoding should be used in general or
    only for some columns. If both dictionary and byte_stream_stream are
    enabled, then dictionary is preferred.
    The byte_stream_split encoding is valid only for floating-point data types
    and should be combined with a compression codec.
column_encoding : string or dict, default None
    Specify the encoding scheme on a per column basis.
    Currently supported values: {'PLAIN', 'BYTE_STREAM_SPLIT'}.
    Certain encodings are only compatible with certain data types.
    Please refer to the encodings section of `Reading and writing Parquet
    files <https://arrow.apache.org/docs/cpp/parquet.html#encodings>`_.
data_page_version : {"1.0", "2.0"}, default "1.0"
    The serialized Parquet data page format version to write, defaults to
    1.0. This does not impact the file schema logical types and Arrow to
    Parquet type casting behavior; for that use the "version" option.
use_compliant_nested_type : bool, default True
    Whether to write compliant Parquet nested type (lists) as defined
    `here <https://github.com/apache/parquet-format/blob/master/
    LogicalTypes.md#nested-types>`_, defaults to ``False``.
    For ``use_compliant_nested_type=True``, this will write into a list
    with 3-level structure where the middle level, named ``list``,
    is a repeated group with a single field named ``element``::

        <list-repetition> group <name> (LIST) {
            repeated group list {
                  <element-repetition> <element-type> element;
            }
        }

    For ``use_compliant_nested_type=False``, this will also write into a list
    with 3-level structure, where the name of the single field of the middle
    level ``list`` is taken from the element name for nested columns in Arrow,
    which defaults to ``item``::

        <list-repetition> group <name> (LIST) {
            repeated group list {
                <element-repetition> <element-type> item;
            }
        }
encryption_properties : FileEncryptionProperties, default None
    File encryption properties for Parquet Modular Encryption.
    If None, no encryption will be done.
    The encryption properties can be created using:
    ``CryptoFactory.file_encryption_properties()``.
write_batch_size : int, default None
    Number of values to write to a page at a time. If None, use the default of
    1024. ``write_batch_size`` is complementary to ``data_page_size``. If pages
    are exceeding the ``data_page_size`` due to large column values, lowering
    the batch size can help keep page sizes closer to the intended size.
dictionary_pagesize_limit : int, default None
    Specify the dictionary page size limit per row group. If None, use the
    default 1MB.
store_schema : bool, default True
    By default, the Arrow schema is serialized and stored in the Parquet
    file metadata (in the "ARROW:schema" key). When reading the file,
    if this key is available, it will be used to more faithfully recreate
    the original Arrow data. For example, for tz-aware timestamp columns
    it will restore the timezone (Parquet only stores the UTC values without
    timezone), or columns with duration type will be restored from the int64
    Parquet column.
write_page_index : bool, default False
    Whether to write a page index in general for all columns.
    Writing statistics to the page index disables the old method of writing
    statistics to each data page header. The page index makes statistics-based
    filtering more efficient than the page header, as it gathers all the
    statistics for a Parquet file in a single place, avoiding scattered I/O.
    Note that the page index is not yet used on the read size by PyArrow.
at  Generate an example PyArrow Table and RecordBatch:

>>> import pyarrow as pa
>>> table = pa.table({'n_legs': [2, 2, 4, 4, 5, 100],
...                   'animal': ["Flamingo", "Parrot", "Dog", "Horse",
...                              "Brittle stars", "Centipede"]})
>>> batch = pa.record_batch([[2, 2, 4, 4, 5, 100],
...                         ["Flamingo", "Parrot", "Dog", "Horse",
...                          "Brittle stars", "Centipede"]],
...                         names=['n_legs', 'animal'])

create a ParquetWriter object:

>>> import pyarrow.parquet as pq
>>> writer = pq.ParquetWriter('example.parquet', table.schema)

and write the Table into the Parquet file:

>>> writer.write_table(table)
>>> writer.close()

>>> pq.read_table('example.parquet').to_pandas()
   n_legs         animal
0       2       Flamingo
1       2         Parrot
2       4            Dog
3       4          Horse
4       5  Brittle stars
5     100      Centipede

create a ParquetWriter object for the RecordBatch:

>>> writer2 = pq.ParquetWriter('example2.parquet', batch.schema)

and write the RecordBatch into the Parquet file:

>>> writer2.write_batch(batch)
>>> writer2.close()

>>> pq.read_table('example2.parquet').to_pandas()
   n_legs         animal
0       2       Flamingo
1       2         Parrot
2       4            Dog
3       4          Horse
4       5  Brittle stars
5     100      Centipede
c                   @   s`   e Zd ZdeeZddd	Zd
d Zdd Z	dd Z
dddZdddZdddZdd ZdS )ParquetWritera  
Class for incrementally building a Parquet file for Arrow tables.

Parameters
----------
where : path or file-like object
schema : pyarrow.Schema
{}
writer_engine_version : unused
**options : dict
    If options contains a key `metadata_collector` then the
    corresponding value is assumed to be a list (or any object with
    `.append` method) that will be filled with the file metadata instance
    of the written file.

Examples
--------
{}
N2.6TsnappyF1.0c                 K   s  |	d kr"|d k	rd|krd}	nd}	|| _ |d k	rBt||\}| _nd| _|| _|| _d | _t||dd\}}|d k	rt|tj	r|
|d }| _q|j|d d }| _n|}|dd | _d}tj||f|||||	|
||||||||||d	|| _d| _d S )
Nr   TF)Zallow_legacy_filesystemwb)compressionmetadata_collectorZV2)versionr   use_dictionarywrite_statisticsuse_deprecated_int96_timestampscompression_leveluse_byte_stream_splitcolumn_encodingwriter_engine_versiondata_page_versionuse_compliant_nested_typeencryption_propertieswrite_batch_sizedictionary_pagesize_limitstore_schemawrite_page_index)r   r   r   r   wherefile_handler   r,   r(   r   rl   open_output_streampop_metadata_collector_parquetr   writeris_open)ro   r   r   r   r   r   r   r   r   r   r   r   r   r   r   r   r   r   r   r   r   optionsr$   ZsinkZengine_versionr%   r%   r&   rq     sd      
  zParquetWriter.__init__c                 C   s   t | ddr|   d S )Nr   F)rh   ru   rr   r%   r%   r&   __del__  s    zParquetWriter.__del__c                 C   s   | S r   r%   rr   r%   r%   r&   rs     s    zParquetWriter.__enter__c                 O   s   |    dS NFrt   rv   r%   r%   r&   ry     s    zParquetWriter.__exit__c                 C   sD   t |tjr| || n&t |tjr4| || ntt|dS )a{  
        Write RecordBatch or Table to the Parquet file.

        Parameters
        ----------
        table_or_batch : {RecordBatch, Table}
        row_group_size : int, default None
            Maximum number of rows in each written row group. If None,
            the row group size will be the minimum of the input
            table or batch length and 1024 * 1024.
        N)r,   r   ZRecordBatchwrite_batchr   write_table	TypeErrorr   )ro   Ztable_or_batchrow_group_sizer%   r%   r&   write  s
    zParquetWriter.writec                 C   s"   t j|g|j}| || dS )a  
        Write RecordBatch to the Parquet file.

        Parameters
        ----------
        batch : RecordBatch
        row_group_size : int, default None
            Maximum number of rows in written row group. If None, the
            row group size will be the minimum of the RecordBatch
            size and 1024 * 1024.  If set larger than 64Mi then 64Mi
            will be used instead.
        N)r   r   Zfrom_batchesr   r   )ro   batchr   r   r%   r%   r&   r   ,  s    zParquetWriter.write_batchc                 C   s^   | j rt|| j| j}| js t|jj| jddsJd|j| j}t|| j	j
||d dS )a  
        Write Table to the Parquet file.

        Parameters
        ----------
        table : Table
        row_group_size : int, default None
            Maximum number of rows in each written row group. If None,
            the row group size will be the minimum of the Table size
            and 1024 * 1024.  If set larger than 64Mi then 64Mi will
            be used instead.

        FZcheck_metadatazTTable schema does not match schema used to create file: 
table:
{!s} vs. 
file:
{!s}r   N)r   r   r   r   r   AssertionErrorequalsrO   r:   r   r   )ro   r   r   msgr%   r%   r&   r   <  s    
 zParquetWriter.write_tablec                 C   sH   | j r0| j  d| _ | jdk	r0| j| jj | jdk	rD| j  dS )z;
        Close the connection to the Parquet file.
        FN)r   r   ru   r   rV   r]   r   rr   r%   r%   r&   ru   V  s    


zParquetWriter.close)NNr   Tr   TNNFNNr   TNNNTF)N)N)N)r   r   r   rO   _parquet_writer_arg_docs_parquet_writer_example_docr   rq   r   rs   ry   r   r   r   ru   r%   r%   r%   r&   r     s<                      
N


r   c                 C   s   t | d dd S )Nr   utf8r   )jsonloadsdecode)	keyvaluesr%   r%   r&   r   c  s    r   c                   @   s   e Zd ZdZeedddddfddZeeedddddfddZd	d
 Z	dd Z
dd Zdd Zdd Zdd ZdddZdS )ParquetDatasetPiecea  
    DEPRECATED: A single chunk of a potentially larger Parquet dataset to read.

    The arguments will indicate to read either a single row group or all row
    groups, and whether to add partition keys to the resulting pyarrow.Table.

    .. deprecated:: 5.0
        Directly constructing a ``ParquetDatasetPiece`` is deprecated, as well
        as accessing the pieces of a ``ParquetDataset`` object. Specify
        ``use_legacy_dataset=False`` when constructing the ``ParquetDataset``
        and use the ``ParquetDataset.fragments`` attribute instead.

    Parameters
    ----------
    path : str or pathlib.Path
        Path to file in the file system where this piece is located.
    open_file_func : callable
        Function to use for obtaining file handle to dataset piece.
    file_options : dict
        Options
    row_group : int, default None
        Row group to load. By default, reads all row groups.
    partition_keys : list of tuples
        Two-element tuples of ``(column name, ordinal index)``.
    rbmodeNc                 C   s&   t jdtdd | ||||| d S )Nz^ParquetDatasetPiece is deprecated as of pyarrow 5.0.0 and will be removed in a future version.   
stacklevel)warningswarnFutureWarning_initro   r$   open_file_funcfile_options	row_grouppartition_keysr%   r%   r&   rq     s         zParquetDatasetPiece.__init__c                 C   s    t t }|| |||| |S r   )r   __new__r   )r$   r   r   r   r   ro   r%   r%   r&   _create  s    
    zParquetDatasetPiece._createc                 C   s.   t || _|| _|| _|pg | _|p&i | _d S r   )r   r$   r   r   r   r   r   r%   r%   r&   r     s
    

zParquetDatasetPiece._initc                 C   s2   t |tsdS | j|jko0| j|jko0| j|jkS r   )r,   r   r$   r   r   ro   otherr%   r%   r&   __eq__  s    


zParquetDatasetPiece.__eq__c                 C   s   d t| j| j| j| jS )Nz-{}({!r}, row_group={!r}, partition_keys={!r}))rO   r   r   r$   r   r   rr   r%   r%   r&   __repr__  s     zParquetDatasetPiece.__repr__c                 C   s^   d}t | jdkr6ddd | jD }|d|7 }|| j7 }| jd k	rZ|d| j7 }|S )N r   z, c                 s   s   | ]\}}d  ||V  qdS )z{}={}NrO   )r5   r   indexr%   r%   r&   r7     s   z.ParquetDatasetPiece.__str__.<locals>.<genexpr>zpartition[{}] z | row_group={})r3   r   r}   rO   r$   r   )ro   r   Zpartition_strr%   r%   r&   __str__  s    


zParquetDatasetPiece.__str__c              
   C   s&   |   }|jW  5 Q R  S Q R X dS )z
        Return the file's metadata.

        Returns
        -------
        metadata : FileMetaData
            The file's metadata
        N)rl   r]   )ro   Zparquetr%   r%   r&   get_metadata  s    	
z ParquetDatasetPiece.get_metadatac                 C   s.   |  | j}t|ts$t|f| j}d|_|S )z1
        Return instance of ParquetFile.
        T)r   r$   r,   r\   r   ri   )ro   rk   r%   r%   r&   rl     s
    
zParquetDatasetPiece.openTFc                 C   s   | j dk	r|  }n(|dk	r,t|f| j}nt| jf| j}t|||d}| jdk	rf|j| jf|}n|jf |}t	| j
dkr|dkrtdt| j
D ]F\}	\}
}tjt	||dd}|j|	 j}tj||}||
|}q|jdd |S )	a%  
        Read this piece as a pyarrow.Table.

        Parameters
        ----------
        columns : list of column names, default None
        use_threads : bool, default True
            Perform multi-threaded column reads.
        partitions : ParquetPartitions, default None
        file : file-like object
            Passed to ParquetFile.
        use_pandas_metadata : bool
            If pandas metadata should be used or not.

        Returns
        -------
        table : pyarrow.Table
            The piece as a pyarrow.Table.
        Nr   r   r   r   zMust pass partition setsi4)ZdtypeTr   )r   rl   r\   r   r$   r   r   r   r   r3   r   r:   r|   npfulllevels
dictionaryr   ZDictionaryArrayr   append_columnru   )ro   r   r   
partitionsfiler   rk   r   r   r   r   r  r   r
  Zarrr%   r%   r&   r     s,    


zParquetDatasetPiece.read)NTNNF)r   r   r   r   r   rl   rq   staticmethodr   r   r   r   r  r  r   r%   r%   r%   r&   r   m  s,   
  
	
      r   c                   @   s:   e Zd ZdZdddZdd Zedd Zed	d
 ZdS )PartitionSeta  
    A data structure for cataloguing the observed Parquet partitions at a
    particular level. So if we have

    /foo=a/bar=0
    /foo=a/bar=1
    /foo=a/bar=2
    /foo=b/bar=0
    /foo=b/bar=1
    /foo=b/bar=2

    Then we have two partition sets, one for foo, another for bar. As we visit
    levels of the partition hierarchy, a PartitionSet tracks the distinct
    values and assigns categorical codes to use when reading the pieces

    Parameters
    ----------
    name : str
        Name of the partition set. Under which key to collect all values.
    keys : list
        All possible values that have been collected for that partition set.
    Nc                 C   s0   || _ |pg | _dd t| jD | _d | _d S )Nc                 S   s   i | ]\}}||qS r%   r%   )r5   r   kr%   r%   r&   
<dictcomp>7  s      z)PartitionSet.__init__.<locals>.<dictcomp>)r   keysr|   key_indices_dictionary)ro   r   r  r%   r%   r&   rq   4  s    
zPartitionSet.__init__c                 C   s<   || j kr| j | S t| j }| j| || j |< |S dS )z
        Get the index of the partition value if it is known, otherwise assign
        one

        Parameters
        ----------
        key : str or int
            The value for which we want to known the index.
        N)r  r3   r  rV   )ro   r   r  r%   r%   r&   	get_index:  s    




zPartitionSet.get_indexc                 C   sp   | j d k	r| j S t| jdkr&tdzdd | jD }t|}W n  tk
rd   t| j}Y nX || _ |S )Nr   zNo known partition keysc                 S   s   g | ]}t |qS r%   )intr5   xr%   r%   r&   rR   V  s     z+PartitionSet.dictionary.<locals>.<listcomp>)r  r3   r  r:   libarray)ro   Zinteger_keysr
  r%   r%   r&   r
  L  s    
zPartitionSet.dictionaryc                 C   s   t | jt| jkS r   )r;   r  sortedrr   r%   r%   r&   	is_sorted^  s    zPartitionSet.is_sorted)N)	r   r   r   r   rq   r  r   r
  r  r%   r%   r%   r&   r    s   

r  c                   @   sD   e Zd Zdd Zdd Zdd Zdd Zd	d
 Zdd Zdd Z	dS )ParquetPartitionsc                 C   s   g | _ t | _d S r   )r	  setpartition_namesrr   r%   r%   r&   rq   e  s    zParquetPartitions.__init__c                 C   s
   t | jS r   )r3   r	  rr   r%   r%   r&   __len__i  s    zParquetPartitions.__len__c                 C   s
   | j | S r   )r	  )ro   r   r%   r%   r&   __getitem__l  s    zParquetPartitions.__getitem__c                 C   s*   t |tstd| j|jko(| j|jkS )Nz0`other` must be an instance of ParquetPartitions)r,   r  r   r	  r  r   r%   r%   r&   r   o  s
    

zParquetPartitions.equalsc                 C   s*   z|  |W S  tk
r$   t Y S X d S r   r   r   NotImplementedr   r%   r%   r&   r   v  s    zParquetPartitions.__eq__c                 C   sV   |t | jkrF|| jkr&td|t|}| j| | j| | j| |S )ae  
        Record a partition value at a particular level, returning the distinct
        code for that value at that level.

        Examples
        --------

        partitions.get_index(1, 'foo', 'a') returns 0
        partitions.get_index(1, 'foo', 'b') returns 1
        partitions.get_index(1, 'foo', 'c') returns 2
        partitions.get_index(1, 'foo', 'a') returns 0

        Parameters
        ----------
        level : int
            The nesting level of the partition we are observing
        name : str
            The partition name
        key : str or int
            The partition value
        z1{} was the name of the partition in another level)	r3   r	  r  r:   rO   r  rV   addr  )ro   levelr   r   Zpart_setr%   r%   r&   r  |  s    
zParquetPartitions.get_indexc                 C   s\  |\}}|\}}}||krdS t |}	|dkrt|tsDtd|	j|sPtdtdd |D dkrptd|t tt|}	nt|t	st|trtd	||	| j
| j|  }
|d
ks|dkr|
|kS |dkr|
|kS |dkr|
|k S |dkr|
|kS |dkr|
|kS |dkr&|
|kS |dkr8|
|kS |dkrJ|
|kS td|d d S )NT>   rK   rL   z'%s' object is not a collectionz+Cannot use empty collection as filter valuec                 S   s   h | ]}t |qS r%   )r   )r5   itemr%   r%   r&   	<setcomp>  s     z=ParquetPartitions.filter_accepts_partition.<locals>.<setcomp>rz   z8All elements of the collection '%s' must be of same typez-Op '%s' not supported with a collection valuerD   rE   rF   rG   rH   rI   rJ   rK   rL   z+'%s' is not a valid operator in predicates.)r   r,   r   r   r   r:   r3   nextiterr/   r	  r
  Zas_py)ro   part_keyfilterr%  Zp_columnZp_value_indexZf_columnrB   Zf_valueZf_typeZp_valuer%   r%   r&   filter_accepts_partition  sZ    

  





z*ParquetPartitions.filter_accepts_partitionN)
r   r   r   rq   r   r!  r   r   r  r,  r%   r%   r%   r&   r  c  s   !r  c                   @   s>   e Zd ZdddZdd Zd	d
 Zdd Zdd Zdd ZdS )ParquetManifestN/hiverz   c                 C   s   t ||\}}|| _|| _|| _t|| _|| _t | _g | _	|| _
tj|d| _d | _d | _| d| jg  | j	jdd d | jd kr| j| _| j  d S )N)max_workersr   c                 S   s   | j S r   )r$   piecer%   r%   r&   <lambda>      z*ParquetManifest.__init__.<locals>.<lambda>)r   )r*   r   r   pathsepr   dirpathpartition_schemer  r  pieces_metadata_nthreadsr   ThreadPoolExecutor_thread_poolcommon_metadata_pathmetadata_path_visit_levelsortshutdown)ro   r6  r   r   r5  r7  metadata_nthreadsr%   r%   r&   rq     s&    

zParquetManifest.__init__c                    s   j }t| \}}}g }|D ]P}	j |	f}
|	drH|
_q"|	drZ|
_q"|	rhq"q"|	|
 q" fdd|D }|
  |
  t|dkrt|dkrtd n(t|dkr܈||| n|| d S )N_common_metadata	_metadatac                    s$   g | ]}t |sj |fqS r%   )_is_private_directoryr5  r}   r  	base_pathro   r%   r&   rR     s   z0ParquetManifest._visit_level.<locals>.<listcomp>r   z,Found files in an intermediate directory: {})r   r(  walkr5  r}   endswithr<  r=  _should_silently_excluderV   r?  r3   r:   rO   _visit_directories_push_pieces)ro   r%  rF  	part_keysfsr   directoriesfilesZfiltered_filesr$   	full_pathZfiltered_directoriesr%   rE  r&   r>    s0    


zParquetManifest._visit_levelc                 C   s0   | dp.| dp.|dp.|dp.|tkS )Nz.crcz	_$folder$r{   r   )rH  
startswithEXCLUDED_PARQUET_PATHS)ro   	file_namer%   r%   r&   rI    s    
z(ParquetManifest._should_silently_excludec                 C   s   g }|D ]~}t || j\}}t|\}}	| j|||	}
|||
fg }|| jk rt| j| j|d ||}|	| q| |d || q|rt
| d S Nrz   )_path_splitr5  _parse_hive_partitionr  r  r9  r;  submitr>  rV   r   wait)ro   r%  rN  rL  Zfutures_listr$   headtailr   r   r  Zdir_part_keysfuturer%   r%   r&   rJ    s     

z"ParquetManifest._visit_directoriesc                 C   s&   | j dkrt|S td| j d S )Nr/  zpartition schema: {})r7  rV  r=   rO   )ro   dirnamer%   r%   r&   _parse_partition(  s
    
z ParquetManifest._parse_partitionc                    s    j  fdd|D  d S )Nc                    s   g | ]}t j| jd qS ))r   r   )r   r   r   r5   r$   rL  ro   r%   r&   rR   0  s   z0ParquetManifest._push_pieces.<locals>.<listcomp>)r8  r   )ro   rO  rL  r%   r_  r&   rK  /  s    zParquetManifest._push_pieces)NNr.  r/  rz   )	r   r   r   rq   r>  rI  rJ  r]  rK  r%   r%   r%   r&   r-    s         
!r-  c                 C   s"   d| krt d| | ddS )NrD   z3Directory name did not appear to be a partition: {}rz   )r:   rO   split)valuer%   r%   r&   rV  7  s
    rV  c                 C   s,   t j| \}}|ds$|do*d|kS )Nr   r{   rD   )osr$   r`  rQ  )r  r   rZ  r%   r%   r&   rD  >  s    rD  c                 C   s:   |  |d }| d | | |d   }}||}||fS rT  )rfindrstrip)r$   sepr   rY  rZ  r%   r%   r&   rU  C  s    
rU  Z_SUCCESSc                   @   s   e Zd ZdZdS )_ParquetDatasetMetadata)rM  r`   r_   r^   ra   N)r   r   r   	__slots__r%   r%   r%   r&   rf  M  s   rf  c                 C   sD   | j d k	r(t| j tjs(| j j|dd}t||| j| j| j| j	dS )Nr   r   )r]   r`   r_   r^   ra   )
rM  r,   r(   r   rl   r\   r`   r_   r^   ra   )rT   r$   metar%   r%   r&   _open_dataset_fileR  s    
ri  z['{}' attribute is deprecated as of pyarrow 5.0.0 and will be removed in a future version.{}a  read_dictionary : list, default None
    List of names or column paths (for nested types) to read directly
    as DictionaryArray. Only supported for BYTE_ARRAY storage. To read
    a flat column as dictionary-encoded pass the column name. For
    nested types, you must pass the full column "path", which could be
    something like level1.level2.list.item. Refer to the Parquet
    file's schema to obtain the paths.
memory_map : bool, default False
    If the source is a file path, use a memory map to read file, which can
    improve performance in some environments.
buffer_size : int, default 0
    If positive, perform read buffering when deserializing individual
    column chunks. Otherwise IO calls are unbuffered.
partitioning : pyarrow.dataset.Partitioning or str or list of str, default "hive"
    The partitioning scheme for a partitioned dataset. The default of "hive"
    assumes directory names with key=value pairs like "/year=2009/month=11".
    In addition, a scheme like "/2009/11" is also supported, in which case
    you need to specify the field names or a full schema. See the
    ``pyarrow.dataset.partitioning()`` function for more details.a  Generate an example PyArrow Table and write it to a partitioned dataset:

>>> import pyarrow as pa
>>> table = pa.table({'year': [2020, 2022, 2021, 2022, 2019, 2021],
...                   'n_legs': [2, 2, 4, 4, 5, 100],
...                   'animal': ["Flamingo", "Parrot", "Dog", "Horse",
...                              "Brittle stars", "Centipede"]})

>>> import pyarrow.parquet as pq
>>> pq.write_to_dataset(table, root_path='dataset_name',
...                     partition_cols=['year'],
...                     use_legacy_dataset=False)

create a ParquetDataset object from the dataset source:

>>> dataset = pq.ParquetDataset('dataset_name/', use_legacy_dataset=False)

and read the data:

>>> dataset.read().to_pandas()
   n_legs         animal  year
0       5  Brittle stars  2019
1       2       Flamingo  2020
2       4            Dog  2021
3     100      Centipede  2021
4       2         Parrot  2022
5       4          Horse  2022

create a ParquetDataset object with filter:

>>> dataset = pq.ParquetDataset('dataset_name/', use_legacy_dataset=False,
...                             filters=[('n_legs','=',4)])
>>> dataset.read().to_pandas()
   n_legs animal  year
0       4    Dog  2021
1       4  Horse  2022
c                   @   s@  e Zd ZdeeeZd;ddZd<d	d
Z	dd Z
dd Zdd Zdd Zd=ddZdd Zdd Zdd Zedd Zedd Zedd  Zed!d" Zed#d$ Zed%d& Zeed'Zed(d) Zed*d+ Zed,d- Zed.d/ Zeed0Z ed1d2 Z!ed3d4 Z"ed5d6 Z#ed7d8 Z$ed9d: Z%dS )>ParquetDataseta"  
Encapsulates details of reading a complete Parquet dataset possibly
consisting of multiple files and partitions in subdirectories.

Parameters
----------
path_or_paths : str or List[str]
    A directory name, single file name, or list of file names.
filesystem : FileSystem, default None
    If nothing passed, will be inferred based on path.
    Path will try to be found in the local on-disk filesystem otherwise
    it will be parsed as an URI to determine the filesystem.
schema : pyarrow.parquet.Schema
    Use schema obtained elsewhere to validate file schemas. Alternative to
    metadata parameter.
metadata : pyarrow.parquet.FileMetaData
    Use metadata obtained elsewhere to validate file schemas.
split_row_groups : bool, default False
    Divide files into pieces for each row group in the file.
validate_schema : bool, default True
    Check that individual file schemas are all the same / compatible.
filters : pyarrow.compute.Expression or List[Tuple] or List[List[Tuple]], default None
    Rows which do not match the filter predicate will be removed from scanned
    data. Partition keys embedded in a nested directory structure will be
    exploited to avoid loading files at all if they contain no matching rows.
    If `use_legacy_dataset` is True, filters can only reference partition
    keys and only a hive-style directory structure is supported. When
    setting `use_legacy_dataset` to False, also within-file level filtering
    and different partitioning schemes are supported.

    {1}
metadata_nthreads : int, default 1
    How many threads to allow the thread pool which is used to read the
    dataset metadata. Increasing this is helpful to read partitioned
    datasets.
{0}
use_legacy_dataset : bool, default False
    Set to False to enable the new code path (using the
    new Arrow Dataset API). Among other things, this allows to pass
    `filters` for all columns and not only the partition keys, enables
    different partitioning schemes, etc.
pre_buffer : bool, default True
    Coalesce and issue file reads in parallel to improve performance on
    high-latency filesystems (e.g. S3). If True, Arrow will use a
    background I/O thread pool. This option is only supported for
    use_legacy_dataset=False. If using a filesystem layer that itself
    performs readahead (e.g. fsspec's S3FS), disable readahead for best
    results.
coerce_int96_timestamp_unit : str, default None
    Cast timestamps that are stored in INT96 format to a particular resolution
    (e.g. 'ms'). Setting to None is equivalent to 'ns' and therefore INT96
    timestamps will be inferred as timestamps in nanoseconds.
thrift_string_size_limit : int, default None
    If not None, override the maximum total string size allocated
    when decoding Thrift structures. The default limit should be
    sufficient for most Parquet files.
thrift_container_size_limit : int, default None
    If not None, override the maximum total size of containers allocated
    when decoding Thrift structures. The default limit should be
    sufficient for most Parquet files.

Examples
--------
{2}
NFTr   r/  c                 C   st   d}|d kr&t |tjr"d}d}nd}|sRt|||||	|
||||||||||dS tjd| tdd t| }|S )	Nr   Tz The legacy behaviour was still chosen because a deprecated 'pyarrow.filesystem' filesystem was specified (use the filesystems from pyarrow.fs instead).F)r   r>   partitioningr_   r`   ra   rb   rc   r   r]   split_row_groupsvalidate_schemarA  re   rf   zPassing 'use_legacy_dataset=True' to get the legacy behaviour is deprecated as of pyarrow 11.0.0, and the legacy implementation will be removed in a future version.r   r   )	r,   r(   r   _ParquetDatasetV2r   r   r   objectr   )clspath_or_pathsr   r   r]   rl  rm  r>   rA  r_   r`   ra   rk  use_legacy_datasetrb   rc   re   rf   Z	extra_msgro   r%   r%   r&   r     sF    	   
zParquetDataset.__new__c              	   C   s  |dkrt d|d k	r*tjdtdd nd}t | _|}t|trL|d }t||\| j_	}t|trzdd	 |D | _
n
t|| _
|	| j_|
| j_|| j_t|| j|tt| jd
\| _| _| _| _| jd k	r| j| j}t||
d| j_W 5 Q R X nd | j_|d k	rtjdtdd |d kr^| jd k	r^| j| j}t||
d| _W 5 Q R X n|| _|d k	r~tjdtdd || _|| _|rtd|d k	rt|drtdt |}| !| |r| "  d S )Nr/  zVOnly "hive" for hive-like partitioning is supported when using use_legacy_dataset=TruezSpecifying the 'metadata_nthreads' argument is deprecated as of pyarrow 8.0.0, and the argument will be removed in a future versionr   r   rz   r   c                 S   s   g | ]}t |qS r%   )r'   r^  r%   r%   r&   rR   6  s     z+ParquetDataset.__init__.<locals>.<listcomp>)rA  r   )r`   zdSpecifying the 'metadata' argument with 'use_legacy_dataset=True' is deprecated as of pyarrow 8.0.0.zSpecifying the 'schema' argument with 'use_legacy_dataset=True' is deprecated as of pyarrow 8.0.0. You can still specify it in combination with 'use_legacy_dataet=False', but in that case you need to specify a pyarrow.Schema instead of a ParquetSchema.z$split_row_groups not yet implementedcastz6Expressions as filter not supported for legacy dataset)#r:   r   r   r   rf  _ds_metadatar,   r;   r*   rM  r~   r'   r_   r`   ra   _make_manifest_fsr   ri  _pieces_partitions_common_metadata_path_metadata_pathrl   read_metadatar^   rC  _schemarl  r=   hasattrr   rC   _filtervalidate_schemas)ro   rq  r   r   r]   rl  rm  r>   rA  r_   r`   ra   rk  rr  rb   rc   re   rf   Za_pathr   r6   r%   r%   r&   rq     s     


  


 
 

zParquetDataset.__init__c                 C   s   dt ddfS )Nr%   T)rr  )r   rr   r%   r%   r&   __getnewargs_ex__t  s    z ParquetDataset.__getnewargs_ex__c                 C   sv   t |tstd| jj|jjkr&dS dD ]}t| |t||kr* dS q*dD ]"}t| j|t|j|krN dS qNdS )N-`other` must be an instance of ParquetDatasetF)	r~   rw  rx  ry  rz  rB  rC  r|  rl  )r`   ra   T)r,   rj  r   rv  	__class__rh   rt  )ro   r   propr%   r%   r&   r   z  s    


zParquetDataset.equalsc                 C   s*   z|  |W S  tk
r$   t Y S X d S r   r"  r   r%   r%   r&   r     s    zParquetDataset.__eq__c                 C   s   | j d kr>| jd kr>| jd k	r*| jj| _qR| jd  j| _n| jd krR| j j| _| j }| jd k	r| jjD ]&}|	|dkrn|	|}|
|}qn| jD ]6}| }|j }|j|ddstd|||qd S )Nr   Fr   z-Schema in {!s} was different. 
{!s}

vs

{!s})rC  r|  rB  r   rw  r  to_arrow_schemarx  r  get_field_indexremover   r:   rO   )ro   Zdataset_schemaZpartition_nameZ	field_idxr2  Zfile_metadataZfile_schemar%   r%   r&   r    s*    







 zParquetDataset.validate_schemasc           
      C   sn   g }| j D ]"}|j||| j|d}|| q
t|}|rj|  }|jjpNi }	|rjd|	krj|	d|i}|S )a  
        Read multiple Parquet files as a single pyarrow.Table.

        Parameters
        ----------
        columns : List[str]
            Names of columns to read from the file.
        use_threads : bool, default True
            Perform multi-threaded column reads
        use_pandas_metadata : bool, default False
            Passed through to each dataset piece.

        Returns
        -------
        pyarrow.Table
            Content of the file as a table (of columns).

        Examples
        --------
        Generate an example dataset:

        >>> import pyarrow as pa
        >>> table = pa.table({'year': [2020, 2022, 2021, 2022, 2019, 2021],
        ...                   'n_legs': [2, 2, 4, 4, 5, 100],
        ...                   'animal': ["Flamingo", "Parrot", "Dog", "Horse",
        ...                              "Brittle stars", "Centipede"]})
        >>> import pyarrow.parquet as pq
        >>> pq.write_to_dataset(table, root_path='dataset_name_read',
        ...                     partition_cols=['year'],
        ...                     use_legacy_dataset=False)
        >>> dataset = pq.ParquetDataset('dataset_name_read/',
        ...                             use_legacy_dataset=False)

        Read multiple Parquet files as a single pyarrow.Table:

        >>> dataset.read(columns=["n_legs"])
        pyarrow.Table
        n_legs: int64
        ----
        n_legs: [[5],[2],[4,100],[2,4]]
        )r   r   r  r   r   )
rw  r   rx  rV   r  Zconcat_tables_get_common_pandas_metadatar   r]   replace_schema_metadata)
ro   r   r   r   Ztablesr2  r   Zall_datar^   Zcurrent_metadatar%   r%   r&   r     s$    *

 zParquetDataset.readc                 K   s   | j f ddi|S )a  
        Read dataset including pandas metadata, if any. Other arguments passed
        through to ParquetDataset.read, see docstring for further details.

        Parameters
        ----------
        **kwargs : optional
            All additional options to pass to the reader.

        Returns
        -------
        pyarrow.Table
            Content of the file as a table (of columns).

        Examples
        --------
        Generate an example PyArrow Table and write it to a partitioned
        dataset:

        >>> import pyarrow as pa
        >>> import pandas as pd
        >>> df = pd.DataFrame({'year': [2020, 2022, 2021, 2022, 2019, 2021],
        ...                    'n_legs': [2, 2, 4, 4, 5, 100],
        ...                    'animal': ["Flamingo", "Parrot", "Dog", "Horse",
        ...                    "Brittle stars", "Centipede"]})
        >>> table = pa.Table.from_pandas(df)
        >>> import pyarrow.parquet as pq
        >>> pq.write_table(table, 'table.parquet')
        >>> dataset = pq.ParquetDataset('table.parquet',
        ...                             use_legacy_dataset=False)

        Read dataset including pandas metadata:

        >>> dataset.read_pandas(columns=["n_legs"])
        pyarrow.Table
        n_legs: int64
        ----
        n_legs: [[2,2,4,4,5,100]]

        Select pandas metadata:

        >>> dataset.read_pandas(columns=["n_legs"]).schema.pandas_metadata
        {'index_columns': [{'kind': 'range', 'name': None, 'start': 0, ...}
        r   Tr   ro   rx   r%   r%   r&   read_pandas  s    -zParquetDataset.read_pandasc                 C   s"   | j d krd S | j j}|dd S )Nr   )rB  r]   get)ro   r   r%   r%   r&   r  !  s    
z*ParquetDataset._get_common_pandas_metadatac                    s<   | j j  fddfddfdd| jD | _d S )Nc                    s   t  fddt| jD S )Nc                 3   s   | ]\}} ||V  qd S r   r%   )r5   r%  r*  )accepts_filterr+  r%   r&   r7   ,  s   zEParquetDataset._filter.<locals>.one_filter_accepts.<locals>.<genexpr>)r<   r|   r   )r2  r+  )r  )r+  r&   one_filter_accepts+  s    z2ParquetDataset._filter.<locals>.one_filter_acceptsc                    s   t  fddD S )Nc                 3   s&   | ]}t  fd d|D V  qdS )c                 3   s   | ]} |V  qd S r   r%   r4   r  r2  r%   r&   r7   0  s     zOParquetDataset._filter.<locals>.all_filters_accept.<locals>.<genexpr>.<genexpr>N)r<   )r5   r@   r  r%   r&   r7   0  s   zEParquetDataset._filter.<locals>.all_filters_accept.<locals>.<genexpr>)r9   r1  )r>   r  r1  r&   all_filters_accept/  s    z2ParquetDataset._filter.<locals>.all_filters_acceptc                    s   g | ]} |r|qS r%   r%   )r5   p)r  r%   r&   rR   3  s      z*ParquetDataset._filter.<locals>.<listcomp>)rx  r,  rw  )ro   r>   r%   )r  r  r>   r  r&   r~  (  s    zParquetDataset._filterc                 C   s   t jtddtdd | jS )
        DEPRECATED
        ParquetDataset.piecesz{ Specify 'use_legacy_dataset=False' while constructing the ParquetDataset, and then use the '.fragments' attribute instead.r   r   )r   r   	_DEPR_MSGrO   r   rw  rr   r%   r%   r&   r8  5  s     r  c                 C   s   t jtddtdd | jS )r  ParquetDataset.partitionsz~ Specify 'use_legacy_dataset=False' while constructing the ParquetDataset, and then use the '.partitioning' attribute instead.r   r   )r   r   r  rO   r   rx  rr   r%   r%   r&   r  C  s     r  c                 C   s   t jtddtdd | jS )NParquetDataset.schemaz Specify 'use_legacy_dataset=False' while constructing the ParquetDataset, and then use the '.schema' attribute instead (which will return an Arrow schema instead of a Parquet schema).r   r   )r   r   r  rO   r   r|  rr   r%   r%   r&   r   Q  s     r  c                 C   s    t jtddtdd | jjS )r  ParquetDataset.memory_mapr   r   r   )r   r   r  rO   r   rt  r`   rr   r%   r%   r&   r`   ]  s    
 r  c                 C   s    t jtddtdd | jjS )r  ParquetDataset.read_dictionaryr   r   r   )r   r   r  rO   r   rt  r_   rr   r%   r%   r&   r_   g  s    
 r  c                 C   s    t jtddtdd | jjS )r  ParquetDataset.buffer_sizer   r   r   )r   r   r  rO   r   rt  ra   rr   r%   r%   r&   ra   q  s    
 r  z_ds_metadata.fsc                 C   s    t jtddtdd | jjS )r  ParquetDataset.fsz| Specify 'use_legacy_dataset=False' while constructing the ParquetDataset, and then use the '.filesystem' attribute instead.r   r   )r   r   r  rO   r   rt  rM  rr   r%   r%   r&   rM    s     r  c                 C   s   t jtddtdd | jS )r  ParquetDataset.metadatar   r   r   )r   r   r  rO   r   rC  rr   r%   r%   r&   r]     s    
 r  c                 C   s   t jtddtdd | jS )r  ParquetDataset.metadata_pathr   r   r   )r   r   r  rO   r   rz  rr   r%   r%   r&   r=    s    
 r  c                 C   s   t jtddtdd | jS )r  #ParquetDataset.common_metadata_pathr   r   r   )r   r   r  rO   r   ry  rr   r%   r%   r&   r<    s    
 r  z_ds_metadata.common_metadatac                 C   s    t jtddtdd | jjS )r  ParquetDataset.common_metadatar   r   r   )r   r   r  rO   r   rt  r^   rr   r%   r%   r&   r^     s    
 r  c                 C   s   t ddS )aL  
        A list of the Dataset source fragments or pieces with absolute
        file paths. To use this property set 'use_legacy_dataset=False'
        while constructing ParquetDataset object.

        Examples
        --------
        Generate an example dataset:

        >>> import pyarrow as pa
        >>> table = pa.table({'year': [2020, 2022, 2021, 2022, 2019, 2021],
        ...                   'n_legs': [2, 2, 4, 4, 5, 100],
        ...                   'animal': ["Flamingo", "Parrot", "Dog", "Horse",
        ...                              "Brittle stars", "Centipede"]})
        >>> import pyarrow.parquet as pq
        >>> pq.write_to_dataset(table, root_path='dataset_name_fragments',
        ...                     partition_cols=['year'],
        ...                     use_legacy_dataset=False)
        >>> dataset = pq.ParquetDataset('dataset_name_fragments/',
        ...                             use_legacy_dataset=False)

        List the fragments:

        >>> dataset.fragments
        [<pyarrow.dataset.ParquetFileFragment path=dataset_name_fragments/...
        YTo use this property set 'use_legacy_dataset=False' while constructing the ParquetDatasetNr=   rr   r%   r%   r&   	fragments  s    zParquetDataset.fragmentsc                 C   s   t ddS )a  
        A list of absolute Parquet file paths in the Dataset source.
        To use this property set 'use_legacy_dataset=False'
        while constructing ParquetDataset object.

        Examples
        --------
        Generate an example dataset:

        >>> import pyarrow as pa
        >>> table = pa.table({'year': [2020, 2022, 2021, 2022, 2019, 2021],
        ...                   'n_legs': [2, 2, 4, 4, 5, 100],
        ...                   'animal': ["Flamingo", "Parrot", "Dog", "Horse",
        ...                              "Brittle stars", "Centipede"]})
        >>> import pyarrow.parquet as pq
        >>> pq.write_to_dataset(table, root_path='dataset_name_files',
        ...                     partition_cols=['year'],
        ...                     use_legacy_dataset=False)
        >>> dataset = pq.ParquetDataset('dataset_name_files/',
        ...                             use_legacy_dataset=False)

        List the files:

        >>> dataset.files
        ['dataset_name_files/year=2019/...-0.parquet', ...
        r  Nr  rr   r%   r%   r&   rO    s    zParquetDataset.filesc                 C   s   t ddS )z
        The filesystem type of the Dataset source.
        To use this property set 'use_legacy_dataset=False'
        while constructing ParquetDataset object.
        r  Nr  rr   r%   r%   r&   r     s    zParquetDataset.filesystemc                 C   s   t ddS )z
        The partitioning of the Dataset source, if discovered.
        To use this property set 'use_legacy_dataset=False'
        while constructing ParquetDataset object.
        r  Nr  rr   r%   r%   r&   rk  	  s    zParquetDataset.partitioning)NNNNFTNNNFr   r/  NTNNN)NNNFTNNNFr   r/  NTNNN)NTF)&r   r   r   rO   _read_docstring_common_DNF_filter_doc_parquet_dataset_exampler   r   rq   r  r   r   r  r   r  r  r~  r   r8  r  r   r`   r_   ra   rW   
attrgetterrv  rM  r]   r=  r<  rB  r^   r  rO  r   rk  r%   r%   r%   r&   rj    s   @  B                      
1                      
[
A/



	
	
	

	
	
	
	



rj  r.  rz   c                 C   s   d }d }d }t | tr*t| dkr*| d } t| rp|| rpt| ||t|dd|d}|j}|j}|j	}	|j
}nbt | ts| g} t| dkrtdg }	| D ]4}
||
std|
tj|
|d}|	| q|	|||fS )	Nrz   r   r5  r.  )r   r   r5  rA  z Must pass at least one file pathzPassed non-file path: {})r   )r,   r;   r3   r   isdirr-  rh   r<  r=  r8  r  r:   isfileOSErrorrO   r   r   rV   )rq  rM  r5  rA  r   r  r<  r=  manifestr8  r$   r2  r%   r%   r&   ru  	  s>    


 ru  c                 C   s   t | tpt | tjS r   )r,   r   r(   )rM  r%   r%   r&   _is_local_file_system7	  s     r  c                   @   s   e Zd ZdZddddddddddddddddZd	d
 Zdd Zedd Zd ddZ	dd Z
dd Zedd Zedd Zedd Zedd Zedd ZdS )!rn  a  
    ParquetDataset shim using the Dataset API under the hood.

    Examples
    --------
    Generate an example PyArrow Table and write it to a partitioned dataset:

    >>> import pyarrow as pa
    >>> table = pa.table({'year': [2020, 2022, 2021, 2022, 2019, 2021],
    ...                   'n_legs': [2, 2, 4, 4, 5, 100],
    ...                   'animal': ["Flamingo", "Parrot", "Dog", "Horse",
    ...                              "Brittle stars", "Centipede"]})
    >>> import pyarrow.parquet as pq
    >>> pq.write_to_dataset(table, root_path='dataset_v2',
    ...                     partition_cols=['year'],
    ...                     use_legacy_dataset=False)

    create a ParquetDataset object from the dataset source:

    >>> dataset = pq.ParquetDataset('dataset_v2/', use_legacy_dataset=False)

    and read the data:

    >>> dataset.read().to_pandas()
       n_legs         animal  year
    0       5  Brittle stars  2019
    1       2       Flamingo  2020
    2       4            Dog  2021
    3     100      Centipede  2021
    4       2         Parrot  2022
    5       4          Horse  2022

    create a ParquetDataset object with filter:

    >>> dataset = pq.ParquetDataset('dataset_v2/',
    ...                             filters=[('n_legs','=',4)],
    ...                             use_legacy_dataset=False)
    >>> dataset.read().to_pandas()
       n_legs animal  year
    0       4    Dog  2021
    1       4  Horse  2022
    Nr/  FT)r>   rk  r_   ra   r`   ignore_prefixesrb   rc   r   rd   re   rf   c                K   s  dd l m} dD ]*\}}||kr|| |k	rtd|q|	|
||d}|r\|jd|d |d k	rp|j|d |d k	r|j|d d | _|d k	rt|| _|d k	rt||d	}n|d kr|rt|d	}t	|d
r|d k	rt
|stdt| d }d | _t|tst|rt|}|d krZzt|\}}W n  tk
rX   t|d	}Y nX ||}|jrp|}|jtjkr|| _n|}|jf |}|d k	r|||}|j|g|p|j||jd| _d S |dkr|jjdd}|j||||||d| _d S )Nr   ))r]   N)rl  F)rm  T)rA  Nz;Keyword '{0}' is not yet supported with the new Dataset API)rb   rc   re   rf   T)Zuse_buffered_streamra   )Zdictionary_columns)rd   )Zuse_mmap
__fspath__zQPath-like objects with __fspath__ must only be used with local file systems, not )r   rO   r   r/  )Zinfer_dictionary)r   r   rO   rk  r  ) rS   rT   r:   rO   update_filter_expressionrZ   r   r   r}  r  r   r   	_base_dirr,   r;   r   r   r   Zfrom_uriget_file_infois_filer   Z	DirectoryParquetFileFormatZmake_fragmentZFileSystemDatasetZphysical_schemar   _datasetZHivePartitioningZdiscover)ro   rq  r   r>   rk  r_   ra   r`   r  rb   rc   r   rd   re   rf   rx   rP   keyworddefaultZread_optionsZsingle_filefinfoparquet_formatfragmentr%   r%   r&   rq   i	  s    
 




 

 z_ParquetDatasetV2.__init__c                 C   sX   t |trtdt |ts$td| j|jkoV| jj|jjkoV| j|jkoV| j|jkS )NzY`other` must be an instance of ParquetDataset constructed with `use_legacy_dataset=False`r  )	r,   rj  r   rn  r   r  rO   r   rO  r   r%   r%   r&   r   	  s    



z_ParquetDatasetV2.equalsc                 C   s*   z|  |W S  tk
r$   t Y S X d S r   r"  r   r%   r%   r&   r   	  s    z_ParquetDatasetV2.__eq__c                 C   s   | j jS )a  
        Schema of the Dataset.

        Examples
        --------
        Generate an example dataset:

        >>> import pyarrow as pa
        >>> table = pa.table({'year': [2020, 2022, 2021, 2022, 2019, 2021],
        ...                   'n_legs': [2, 2, 4, 4, 5, 100],
        ...                   'animal': ["Flamingo", "Parrot", "Dog", "Horse",
        ...                              "Brittle stars", "Centipede"]})
        >>> import pyarrow.parquet as pq
        >>> pq.write_to_dataset(table, root_path='dataset_v2_schema',
        ...                     partition_cols=['year'],
        ...                     use_legacy_dataset=False)
        >>> dataset = pq.ParquetDataset('dataset_v2_schema/',
        ...                             use_legacy_dataset=False)

        Read the schema:

        >>> dataset.schema
        n_legs: int64
        animal: string
        year: dictionary<values=int32, indices=int32, ordered=0>
        )r  r   rr   r%   r%   r&   r   	  s    z_ParquetDatasetV2.schemac           	      C   s   | j jp
i }|r(d|kr(|  }|r(|}|dk	rn|rn|rnd|krndd t|D }t|tt|t|  }| jj|| j|d}|r|rd|kr|j jpi }|	d|d i |
|}|S )aR  
        Read (multiple) Parquet files as a single pyarrow.Table.

        Parameters
        ----------
        columns : List[str]
            Names of columns to read from the dataset. The partition fields
            are not automatically included (in contrast to when setting
            ``use_legacy_dataset=True``).
        use_threads : bool, default True
            Perform multi-threaded column reads.
        use_pandas_metadata : bool, default False
            If True and file has custom pandas schema metadata, ensure that
            index columns are also loaded.

        Returns
        -------
        pyarrow.Table
            Content of the file as a table (of columns).

        Examples
        --------
        Generate an example dataset:

        >>> import pyarrow as pa
        >>> table = pa.table({'year': [2020, 2022, 2021, 2022, 2019, 2021],
        ...                   'n_legs': [2, 2, 4, 4, 5, 100],
        ...                   'animal': ["Flamingo", "Parrot", "Dog", "Horse",
        ...                              "Brittle stars", "Centipede"]})
        >>> import pyarrow.parquet as pq
        >>> pq.write_to_dataset(table, root_path='dataset_v2_read',
        ...                     partition_cols=['year'],
        ...                     use_legacy_dataset=False)
        >>> dataset = pq.ParquetDataset('dataset_v2_read/',
        ...                             use_legacy_dataset=False)

        Read the dataset:

        >>> dataset.read(columns=["n_legs"])
        pyarrow.Table
        n_legs: int64
        ----
        n_legs: [[5],[2],[4,100],[2,4]]
        r   Nc                 S   s   g | ]}t |ts|qS r%   )r,   r   r5   rA   r%   r%   r&   rR   >
  s   
z*_ParquetDatasetV2.read.<locals>.<listcomp>)r   r+  r   )r   r]   r  r   r;   r  r  Zto_tabler  r  r  )	ro   r   r   r   r]   r^   r   r   Znew_metadatar%   r%   r&   r   
  s0    / 
z_ParquetDatasetV2.readc                 C   sf   | j s
d S d }dD ]N}tjt| j |}| j|}|jrt|| jd}|j	}|rd|kr qbq|S )N)rB  rC  r   r   )
r  rb  r$   r}   r/   r   r  r  r{  r]   )ro   r]   r   r=  r  Zpq_metar%   r%   r&   r  U
  s     z-_ParquetDatasetV2._get_common_pandas_metadatac                 K   s   | j f ddi|S )a  
        Read dataset including pandas metadata, if any. Other arguments passed
        through to ParquetDataset.read, see docstring for further details.

        Examples
        --------
        Generate an example parquet file:

        >>> import pyarrow as pa
        >>> import pandas as pd
        >>> df = pd.DataFrame({'year': [2020, 2022, 2021, 2022, 2019, 2021],
        ...                    'n_legs': [2, 2, 4, 4, 5, 100],
        ...                    'animal': ["Flamingo", "Parrot", "Dog", "Horse",
        ...                    "Brittle stars", "Centipede"]})
        >>> table = pa.Table.from_pandas(df)
        >>> import pyarrow.parquet as pq
        >>> pq.write_table(table, 'table_V2.parquet')
        >>> dataset = pq.ParquetDataset('table_V2.parquet',
        ...                             use_legacy_dataset=False)

        Read the dataset with pandas metadata:

        >>> dataset.read_pandas(columns=["n_legs"])
        pyarrow.Table
        n_legs: int64
        ----
        n_legs: [[2,2,4,4,5,100]]

        >>> dataset.read_pandas(columns=["n_legs"]).schema.pandas_metadata
        {'index_columns': [{'kind': 'range', 'name': None, 'start': 0, ...}
        r   Tr  r  r%   r%   r&   r  g
  s     z_ParquetDatasetV2.read_pandasc                 C   s&   t jtddtdd t| j S )Nr  z' Use the '.fragments' attribute insteadr   r   )r   r   r  rO   r   r;   r  get_fragmentsrr   r%   r%   r&   r8  
  s     z_ParquetDatasetV2.piecesc                 C   s   t | j S )a  
        A list of the Dataset source fragments or pieces with absolute
        file paths.

        Examples
        --------
        Generate an example dataset:

        >>> import pyarrow as pa
        >>> table = pa.table({'year': [2020, 2022, 2021, 2022, 2019, 2021],
        ...                   'n_legs': [2, 2, 4, 4, 5, 100],
        ...                   'animal': ["Flamingo", "Parrot", "Dog", "Horse",
        ...                              "Brittle stars", "Centipede"]})
        >>> import pyarrow.parquet as pq
        >>> pq.write_to_dataset(table, root_path='dataset_v2_fragments',
        ...                     partition_cols=['year'],
        ...                     use_legacy_dataset=False)
        >>> dataset = pq.ParquetDataset('dataset_v2_fragments/',
        ...                             use_legacy_dataset=False)

        List the fragments:

        >>> dataset.fragments
        [<pyarrow.dataset.ParquetFileFragment path=dataset_v2_fragments/...
        )r;   r  r  rr   r%   r%   r&   r  
  s    z_ParquetDatasetV2.fragmentsc                 C   s   | j jS )a  
        A list of absolute Parquet file paths in the Dataset source.

        Examples
        --------
        Generate an example dataset:

        >>> import pyarrow as pa
        >>> table = pa.table({'year': [2020, 2022, 2021, 2022, 2019, 2021],
        ...                   'n_legs': [2, 2, 4, 4, 5, 100],
        ...                   'animal': ["Flamingo", "Parrot", "Dog", "Horse",
        ...                              "Brittle stars", "Centipede"]})
        >>> import pyarrow.parquet as pq
        >>> pq.write_to_dataset(table, root_path='dataset_v2_files',
        ...                     partition_cols=['year'],
        ...                     use_legacy_dataset=False)
        >>> dataset = pq.ParquetDataset('dataset_v2_files/',
        ...                             use_legacy_dataset=False)

        List the files:

        >>> dataset.files
        ['dataset_v2_files/year=2019/...-0.parquet', ...
        )r  rO  rr   r%   r%   r&   rO  
  s    z_ParquetDatasetV2.filesc                 C   s   | j jS )z<
        The filesystem type of the Dataset source.
        )r  r   rr   r%   r%   r&   r   
  s    z_ParquetDatasetV2.filesystemc                 C   s   | j jS )zH
        The partitioning of the Dataset source, if discovered.
        )r  rk  rr   r%   r%   r&   rk  
  s    z_ParquetDatasetV2.partitioning)N)NTF)r   r   r   r   rq   r   r   r   r   r   r  r  r8  r  rO  r   rk  r%   r%   r%   r&   rn  =	  s>   +      f

S"



rn  a  
{0}

Parameters
----------
source : str, pyarrow.NativeFile, or file-like object
    If a string passed, can be a single file name or directory name. For
    file-like objects, only read a single file. Use pyarrow.BufferReader to
    read a file contained in a bytes or buffer-like object.
columns : list
    If not None, only these columns will be read from the file. A column
    name may be a prefix of a nested field, e.g. 'a' will select 'a.b',
    'a.c', and 'a.d.e'. If empty, no columns will be read. Note
    that the table will still have the correct num_rows set despite having
    no columns.
use_threads : bool, default True
    Perform multi-threaded column reads.
metadata : FileMetaData
    If separately computed
schema : Schema, optional
    Optionally provide the Schema for the parquet dataset, in which case it
    will not be inferred from the source.
{1}
filesystem : FileSystem, default None
    If nothing passed, will be inferred based on path.
    Path will try to be found in the local on-disk filesystem otherwise
    it will be parsed as an URI to determine the filesystem.
filters : pyarrow.compute.Expression or List[Tuple] or List[List[Tuple]], default None
    Rows which do not match the filter predicate will be removed from scanned
    data. Partition keys embedded in a nested directory structure will be
    exploited to avoid loading files at all if they contain no matching rows.
    If `use_legacy_dataset` is True, filters can only reference partition
    keys and only a hive-style directory structure is supported. When
    setting `use_legacy_dataset` to False, also within-file level filtering
    and different partitioning schemes are supported.

    {3}
use_legacy_dataset : bool, default False
    By default, `read_table` uses the new Arrow Datasets API since
    pyarrow 1.0.0. Among other things, this allows to pass `filters`
    for all columns and not only the partition keys, enables
    different partitioning schemes, etc.
    Set to True to use the legacy behaviour (this option is deprecated,
    and the legacy implementation will be removed in a future version).
ignore_prefixes : list, optional
    Files matching any of these prefixes will be ignored by the
    discovery process if use_legacy_dataset=False.
    This is matched to the basename of a path.
    By default this is ['.', '_'].
    Note that discovery happens only if a directory is passed as source.
pre_buffer : bool, default True
    Coalesce and issue file reads in parallel to improve performance on
    high-latency filesystems (e.g. S3). If True, Arrow will use a
    background I/O thread pool. This option is only supported for
    use_legacy_dataset=False. If using a filesystem layer that itself
    performs readahead (e.g. fsspec's S3FS), disable readahead for best
    results.
coerce_int96_timestamp_unit : str, default None
    Cast timestamps that are stored in INT96 format to a particular
    resolution (e.g. 'ms'). Setting to None is equivalent to 'ns'
    and therefore INT96 timestamps will be inferred as timestamps
    in nanoseconds.
decryption_properties : FileDecryptionProperties or None
    File-level decryption properties.
    The decryption properties can be created using
    ``CryptoFactory.file_decryption_properties()``.
thrift_string_size_limit : int, default None
    If not None, override the maximum total string size allocated
    when decoding Thrift structures. The default limit should be
    sufficient for most Parquet files.
thrift_container_size_limit : int, default None
    If not None, override the maximum total size of containers allocated
    when decoding Thrift structures. The default limit should be
    sufficient for most Parquet files.

Returns
-------
{2}

{4}
ad  
Examples
--------

Generate an example PyArrow Table and write it to a partitioned dataset:

>>> import pyarrow as pa
>>> table = pa.table({'year': [2020, 2022, 2021, 2022, 2019, 2021],
...                   'n_legs': [2, 2, 4, 4, 5, 100],
...                   'animal': ["Flamingo", "Parrot", "Dog", "Horse",
...                              "Brittle stars", "Centipede"]})
>>> import pyarrow.parquet as pq
>>> pq.write_to_dataset(table, root_path='dataset_name_2',
...                     partition_cols=['year'])

Read the data:

>>> pq.read_table('dataset_name_2').to_pandas()
   n_legs         animal  year
0       5  Brittle stars  2019
1       2       Flamingo  2020
2       4            Dog  2021
3     100      Centipede  2021
4       2         Parrot  2022
5       4          Horse  2022


Read only a subset of columns:

>>> pq.read_table('dataset_name_2', columns=["n_legs", "animal"])
pyarrow.Table
n_legs: int64
animal: string
----
n_legs: [[5],[2],[4,100],[2,4]]
animal: [["Brittle stars"],["Flamingo"],["Dog","Centipede"],["Parrot","Horse"]]

Read a subset of columns and read one column as DictionaryArray:

>>> pq.read_table('dataset_name_2', columns=["n_legs", "animal"],
...               read_dictionary=["animal"])
pyarrow.Table
n_legs: int64
animal: dictionary<values=string, indices=int32, ordered=0>
----
n_legs: [[5],[2],[4,100],[2,4]]
animal: [  -- dictionary:
["Brittle stars"]  -- indices:
[0],  -- dictionary:
["Flamingo"]  -- indices:
[0],  -- dictionary:
["Dog","Centipede"]  -- indices:
[0,1],  -- dictionary:
["Parrot","Horse"]  -- indices:
[0,1]]

Read the table with filter:

>>> pq.read_table('dataset_name_2', columns=["n_legs", "animal"],
...               filters=[('n_legs','<',4)]).to_pandas()
   n_legs    animal
0       2  Flamingo
1       2    Parrot

Read data from a single Parquet file:

>>> pq.write_table(table, 'example.parquet')
>>> pq.read_table('dataset_name_2').to_pandas()
   n_legs         animal  year
0       5  Brittle stars  2019
1       2       Flamingo  2020
2       4            Dog  2021
3     100      Centipede  2021
4       2         Parrot  2022
5       4          Horse  2022
Fr/  )r   r   r]   r   r   r_   r`   ra   rk  r   r>   rr  r  rb   rc   rd   re   rf   c                C   sl  |s|d k	rt dz&t| ||
|	|||||||||d}W n tk
r   |d k	rZt d|	dkrjt d|d k	rzt dt| |
\}
}|
d k	r|
|} t| |||||||||d
}Y nX |j|||dS tjd	t	d
d |d k	rt d|d k	rt dt
| rFt 0 tddt	 t| |||||
||	|dd
}W 5 Q R X nt| ||||||d}|j|||dS )NzThe 'metadata' keyword is no longer supported with the new datasets-based implementation. Specify 'use_legacy_dataset=True' to temporarily recover the old behaviour.)r   r   rk  r`   r_   ra   r>   r  rb   rc   re   rf   zWthe 'filters' keyword is not supported when the pyarrow.dataset module is not availabler/  z\the 'partitioning' keyword is not supported when the pyarrow.dataset module is not availablezWthe 'schema' argument is not supported when the pyarrow.dataset module is not available)	r]   r_   r`   ra   rb   rc   rd   re   rf   r  Passing 'use_legacy_dataset=True' to get the legacy behaviour is deprecated as of pyarrow 8.0.0, and the legacy implementation will be removed in a future version.r   r   zMThe 'ignore_prefixes' keyword is only supported when use_legacy_dataset=FalsezEThe 'schema' argument is only supported when use_legacy_dataset=FalseignorezPassing 'use_legacy_datasetT)	r]   r`   r_   ra   r   r>   rk  rc   rr  )r]   r_   r`   ra   rc   rd   )r:   rn  ImportErrorr   rj   r\   r   r   r   r   r   catch_warningsfilterwarningsrj  )rp   r   r   r]   r   r   r_   r`   ra   rk  r   r>   rr  r  rb   rc   rd   re   rf   rT   r$   pfr%   r%   r&   
read_tablez  s    

   
 

     
 r  z}Read a Table from Parquet format

Note: starting with pyarrow 1.0, the default for `use_legacy_dataset` is
switched to False.
zuse_pandas_metadata : bool, default False
    If True and file has custom pandas schema metadata, ensure that
    index columns are also loaded.z=pyarrow.Table
    Content of the file as a table (of columns)c                 K   s   t | f|dd|S )NT)r   r   )r  )rp   r   rx   r%   r%   r&   r    s     r  zcRead a Table from Parquet format, also reading DataFrame
index values if known in the file metadataz6**kwargs
    additional options for :func:`read_table`zgpyarrow.Table
    Content of the file as a Table of Columns, including DataFrame
    indexes as columnsr   r   r   r   c                 K   s   | d|}|}z\t|| jf|||||||
|	||||||||||||d|}|j| |d W 5 Q R X W nH tk
r   t|rztt| W n tj	k
r   Y nX  Y nX d S )N
chunk_size)r   r   r   r   r   coerce_timestampsdata_page_sizeallow_truncated_timestampsr   r   r   r   r   r   r   r   r   r   r   r   r   )
r   r   r   r   	Exceptionr   rb  r  r   error)r   r   r   r   r   r   r   r   r  r  r  r   r   r   r   r   r   r   r   r   r   r   r   rx   Z	use_int96r   r%   r%   r&   r     sN     r   a  Generate an example PyArrow Table:

>>> import pyarrow as pa
>>> table = pa.table({'n_legs': [2, 2, 4, 4, 5, 100],
...                   'animal': ["Flamingo", "Parrot", "Dog", "Horse",
...                              "Brittle stars", "Centipede"]})

and write the Table into Parquet file:

>>> import pyarrow.parquet as pq
>>> pq.write_table(table, 'example.parquet')

Defining row group size for the Parquet file:

>>> pq.write_table(table, 'example.parquet', row_group_size=3)

Defining row group compression (default is Snappy):

>>> pq.write_table(table, 'example.parquet', compression='none')

Defining row group compression and encoding per-column:

>>> pq.write_table(table, 'example.parquet',
...                compression={'n_legs': 'snappy', 'animal': 'gzip'},
...                use_dictionary=['n_legs', 'animal'])

Defining column encoding per-column:

>>> pq.write_table(table, 'example.parquet',
...                column_encoding={'animal':'PLAIN'},
...                use_dictionary=False)
ap  
Write a Table to Parquet format.

Parameters
----------
table : pyarrow.Table
where : string or pyarrow.NativeFile
row_group_size : int
    Maximum number of rows in each written row group. If None, the
    row group size will be the minimum of the Table size and
    1024 * 1024.
{}
**kwargs : optional
    Additional options for ParquetWriter

Examples
--------
{}
c                 C   sH   |   rD| |sDz| | W n" tk
rB   | |s>tY nX d S r   )Z_isfilestoreexistsmkdirr  r   )rM  r$   r%   r%   r&   _mkdir_if_not_existsw  s
    r  c           &         s  |dkr|rd}nd}d}d}|r&|n|}|dk	rL|dk	rLt |dd||dk	rn|dk	rnt |dd	||d
ddk	r|
dk	rt |dd
||sddlm} t }t|jj	D ]}||kr||||< q|d|dd|d< d}dk	rfdd}
|dk	r(t |d|
 }|jf |}|dk	rNt|}|rn| |j}|j|dd}|dkrt d }|dkrd}|j| |f||||||	|
||d	| dS |rtjdtdd d}|dk	rt |d|dk	rt |d|	dk	rt |d|
dk	r6t |d|dk	rNt |d|dk	rft |d|dk	rtjtddtdd t||\}}t|| |dk	r4t|dkr4|    fd d!|D } j|d"d#} j|}t|dkrt d$| j}| jjD ] }||kr|||}qt|d%krJ|d }|j|dd&D ]\}}t |t!sr|f}d'"d(d! t#||D } t$j%j&||dd)}!t|d'"|| g |r||}"n
t d* }"d'"| |"g}#d'"||#g}$|'|$d+}%t(|!|%fd
i| W 5 Q R X dk	rXd, )|# qXnn|rD|d}"n
t d* }"d'"||"g}$|'|$d+}%t(| |%fd
i| W 5 Q R X dk	rd, )|" dS )-a  Wrapper around dataset.write_dataset (when use_legacy_dataset=False) or
    parquet.write_table (when use_legacy_dataset=True) for writing a Table to
    Parquet format by partitions.
    For each combination of partition columns and values,
    a subdirectories are created in the following
    manner:

    root_dir/
      group1=value1
        group2=value1
          <uuid>.parquet
        group2=value2
          <uuid>.parquet
      group1=valueN
        group2=value1
          <uuid>.parquet
        group2=valueN
          <uuid>.parquet

    Parameters
    ----------
    table : pyarrow.Table
    root_path : str, pathlib.Path
        The root directory of the dataset
    partition_cols : list,
        Column names by which to partition the dataset.
        Columns are partitioned in the order they are given
    partition_filename_cb : callable,
        A callback function that takes the partition key(s) as an argument
        and allow you to override the partition filename. If nothing is
        passed, the filename will consist of a uuid.
        This option is only supported for use_legacy_dataset=True.
        When use_legacy_dataset=None and this option is specified,
        use_legacy_datase will be set to True.
    filesystem : FileSystem, default None
        If nothing passed, will be inferred based on path.
        Path will try to be found in the local on-disk filesystem otherwise
        it will be parsed as an URI to determine the filesystem.
    use_legacy_dataset : bool
        Default is False. Set to True to use the the legacy behaviour
        (this option is deprecated, and the legacy implementation will be
        removed in a future version). The legacy implementation still
        supports the `partition_filename_cb` keyword but is less efficient
        when using partition columns.
    schema : Schema, optional
        This option is only supported for use_legacy_dataset=False.
    partitioning : Partitioning or list[str], optional
        The partitioning scheme specified with the
        ``pyarrow.dataset.partitioning()`` function or a list of field names.
        When providing a list of field names, you can use
        ``partitioning_flavor`` to drive which partitioning type should be
        used.
        This option is only supported for use_legacy_dataset=False.
    basename_template : str, optional
        A template string used to generate basenames of written data files.
        The token '{i}' will be replaced with an automatically incremented
        integer. If not specified, it defaults to "guid-{i}.parquet".
        This option is only supported for use_legacy_dataset=False.
    use_threads : bool, default True
        Write files in parallel. If enabled, then maximum parallelism will be
        used determined by the number of available CPU cores.
        This option is only supported for use_legacy_dataset=False.
    file_visitor : function
        If set, this function will be called with a WrittenFile instance
        for each file created during the call.  This object will have both
        a path attribute and a metadata attribute.

        The path attribute will be a string containing the path to
        the created file.

        The metadata attribute will be the parquet metadata of the file.
        This metadata will have the file path attribute set and can be used
        to build a _metadata file.  The metadata attribute will be None if
        the format is not parquet.

        Example visitor which simple collects the filenames created::

            visited_paths = []

            def file_visitor(written_file):
                visited_paths.append(written_file.path)

        This option is only supported for use_legacy_dataset=False.
    existing_data_behavior : 'overwrite_or_ignore' | 'error' | 'delete_matching'
        Controls how the dataset will handle data that already exists in
        the destination. The default behaviour is 'overwrite_or_ignore'.

        'overwrite_or_ignore' will ignore any existing data and will
        overwrite files with the same name as an output file.  Other
        existing files will be ignored.  This behavior, in combination
        with a unique basename_template for each write, will allow for
        an append workflow.

        'error' will raise an error if any data exists in the destination.

        'delete_matching' is useful when you are writing a partitioned
        dataset.  The first time each partition directory is encountered
        the entire directory will be deleted.  This allows you to overwrite
        old partitions completely.
        This option is only supported for use_legacy_dataset=False.
    **kwargs : dict,
        When use_legacy_dataset=False, used as additional kwargs for
        `dataset.write_dataset` function for matching kwargs, and remainder to
        `ParquetFileFormat.make_write_options`. See the docstring
        of `write_table` and `dataset.write_dataset` for the available options.
        When use_legacy_dataset=True, used as additional kwargs for
        `parquet.write_table` function (See docstring for `write_table`
        or `ParquetWriter` for more information).
        Using `metadata_collector` in kwargs allows one to collect the
        file metadata instances of dataset pieces. The file paths in the
        ColumnChunkMetaData will be set relative to `root_path`.

    Examples
    --------
    Generate an example PyArrow Table:

    >>> import pyarrow as pa
    >>> table = pa.table({'year': [2020, 2022, 2021, 2022, 2019, 2021],
    ...                   'n_legs': [2, 2, 4, 4, 5, 100],
    ...                   'animal': ["Flamingo", "Parrot", "Dog", "Horse",
    ...                              "Brittle stars", "Centipede"]})

    and write it to a partitioned dataset:

    >>> import pyarrow.parquet as pq
    >>> pq.write_to_dataset(table, root_path='dataset_name_3',
    ...                     partition_cols=['year'])
    >>> pq.ParquetDataset('dataset_name_3', use_legacy_dataset=False).files
    ['dataset_name_3/year=2019/...-0.parquet', ...

    Write a single Parquet file into the root folder:

    >>> pq.write_to_dataset(table, root_path='dataset_name_4')
    >>> pq.ParquetDataset('dataset_name_4/', use_legacy_dataset=False).files
    ['dataset_name_4/...-0.parquet']
    NTFzVThe '{0}' argument is not supported by use_legacy_dataset={2}. Use only '{1}' instead.zVThe '{1}' argument is not supported by use_legacy_dataset={2}. Use only '{0}' instead.basename_templatepartition_filename_cbrk  partition_colsr   file_visitorr   r   r  Zmax_rows_per_groupzGThe '{}' argument is not supported with the new dataset implementation.c                    s     | j d S r   )rV   r]   )Zwritten_file)r   r%   r&   r  F  s    z&write_to_dataset.<locals>.file_visitorr/  )r   z-{i}.parquetZoverwrite_or_ignore)	r   rO   r   r   rk  r   r  r  existing_data_behaviorr  r   r   zThe '{}' argument is not supported with the legacy implementation. To use this argument specify 'use_legacy_dataset=False' while constructing the ParquetDataset.r   r   r  z Specify 'use_legacy_dataset=False' while constructing the ParquetDataset, and then use the 'basename_template' parameter instead. For usage see `pyarrow.dataset.write_dataset`c                    s   g | ]} | qS r%   r%   r  )dfr%   r&   rR     s     z$write_to_dataset.<locals>.<listcomp>r   )Zaxisz.No data left to save outside partition columnsrz   )Zobservedr.  c                 S   s   g | ]\}}d j ||dqS )z{colname}={value})Zcolnamera  r  )r5   r   r0   r%   r%   r&   rR     s   )r   safez.parquetr   r  )*r:   rO   r   rS   rT   r   inspect	signatureZwrite_dataset
parametersr  Zmake_write_optionsr   selectr   rk  r   r   r   r   r  r(   r)   r  r3   Z	to_pandasZdropr   namesr  r  groupbyr,   tupler}   zipr   r   Zfrom_pandasrl   r   Zset_file_path)&r   	root_pathr  r  r   rr  r   rk  r  r   r  r  rx   Zmsg_confl_0Zmsg_confl_1Z	msg_conflrP   Zwrite_dataset_kwargsr   r   r  Zwrite_optionsZpart_schemaZmsg2rM  r   Zdata_dfZ	data_colsZ	subschemarA   r  ZsubgroupsubdirZsubtableoutfilerelative_pathrP  r6   r%   )r  r   r&   write_to_dataset  s      






     







 	








r  c           
   	   K   s   t ||\}}t|dr | }t|| |f|}|  |dk	rt||d}t|dr`|| |D ]}|| qd|dk	r||}	|	|	 W 5 Q R X n
|	| dS )a&  
    Write metadata-only Parquet file from schema. This can be used with
    `write_to_dataset` to generate `_common_metadata` and `_metadata` sidecar
    files.

    Parameters
    ----------
    schema : pyarrow.Schema
    where : string or pyarrow.NativeFile
    metadata_collector : list
        where to collect metadata information.
    filesystem : FileSystem, default None
        If nothing passed, will be inferred from `where` if path-like, else
        `where` is already a file-like object so no filesystem is needed.
    **kwargs : dict,
        Additional kwargs for ParquetWriter class. See docstring for
        `ParquetWriter` for more information.

    Examples
    --------
    Generate example data:

    >>> import pyarrow as pa
    >>> table = pa.table({'n_legs': [2, 2, 4, 4, 5, 100],
    ...                   'animal': ["Flamingo", "Parrot", "Dog", "Horse",
    ...                              "Brittle stars", "Centipede"]})

    Write a dataset and collect metadata information.

    >>> metadata_collector = []
    >>> import pyarrow.parquet as pq
    >>> pq.write_to_dataset(
    ...     table, 'dataset_metadata',
    ...      metadata_collector=metadata_collector)

    Write the `_common_metadata` parquet file without row groups statistics.

    >>> pq.write_metadata(
    ...     table.schema, 'dataset_metadata/_common_metadata')

    Write the `_metadata` parquet file with row groups statistics.

    >>> pq.write_metadata(
    ...     table.schema, 'dataset_metadata/_metadata',
    ...     metadata_collector=metadata_collector)
    seekNr   )
r   r}  tellr   ru   r{  r  Zappend_row_groupsr   Zwrite_metadata_file)
r   r   r   r   rx   Zcursor_positionr   r]   mr6   r%   r%   r&   write_metadata  s    0


r  c              
   C   sZ   t | |\}} t }|dk	r*||  }} |" t| ||d}|jW  5 Q R  S Q R X dS )a  
    Read FileMetaData from footer of a single Parquet file.

    Parameters
    ----------
    where : str (file path) or file-like object
    memory_map : bool, default False
        Create memory map when the source is a file path.
    decryption_properties : FileDecryptionProperties, default None
        Decryption properties for reading encrypted Parquet files.
    filesystem : FileSystem, default None
        If nothing passed, will be inferred based on path.
        Path will try to be found in the local on-disk filesystem otherwise
        it will be parsed as an URI to determine the filesystem.

    Returns
    -------
    metadata : FileMetaData
        The metadata of the Parquet file

    Examples
    --------
    >>> import pyarrow as pa
    >>> import pyarrow.parquet as pq
    >>> table = pa.table({'n_legs': [4, 5, 100],
    ...                   'animal': ["Dog", "Brittle stars", "Centipede"]})
    >>> pq.write_table(table, 'example.parquet')

    >>> pq.read_metadata('example.parquet')
    <pyarrow._parquet.FileMetaData object at ...>
      created_by: parquet-cpp-arrow version ...
      num_columns: 2
      num_rows: 3
      num_row_groups: 1
      format_version: 2.6
      serialized_size: ...
    Nr`   rd   )r   r   rj   r\   r]   r   r`   rd   r   Zfile_ctxr  r%   r%   r&   r{    s    'r{  c              
   C   s^   t | |\}} t }|dk	r*||  }} |& t| ||d}|j W  5 Q R  S Q R X dS )a  
    Read effective Arrow schema from Parquet file metadata.

    Parameters
    ----------
    where : str (file path) or file-like object
    memory_map : bool, default False
        Create memory map when the source is a file path.
    decryption_properties : FileDecryptionProperties, default None
        Decryption properties for reading encrypted Parquet files.
    filesystem : FileSystem, default None
        If nothing passed, will be inferred based on path.
        Path will try to be found in the local on-disk filesystem otherwise
        it will be parsed as an URI to determine the filesystem.

    Returns
    -------
    schema : pyarrow.Schema
        The schema of the Parquet file

    Examples
    --------
    >>> import pyarrow as pa
    >>> import pyarrow.parquet as pq
    >>> table = pa.table({'n_legs': [4, 5, 100],
    ...                   'animal': ["Dog", "Brittle stars", "Centipede"]})
    >>> pq.write_table(table, 'example.parquet')

    >>> pq.read_schema('example.parquet')
    n_legs: int64
    animal: string
    Nr  )r   r   rj   r\   r   r  r  r%   r%   r&   read_schema>  s    " r  )r   r   r   r   r   rj  r   r\   r   r-  r  r	   r   r   r  r   r
   r{  r  r  r  r  r   r  r[   rZ   )T)N)r.  rz   N)N)Nr   Tr   TNNFNNNNFNr   TNNNTF)
NNNNNNNNNN)NN)FNN)FNN)hcollectionsr   
concurrentr   
contextlibr   	functoolsr   r   r  r   collections.abcr   numpyr  rb  rerW   urllib.parser   r   Zpyarrowr   Zpyarrow.libr  Zpyarrow._parquetr   r  excr/   r	   r
   r   r   r   r   r   r   r   r   Z
pyarrow.fsr   r   r   r   r   r   r(   Zpyarrow.utilr   r   r   r   r#   r'   r*   r2   rC   r  rZ   DeprecationWarningr[   r\   compiler   r   r   r   r   r   r   r   r   r  r  r-  rV  rD  rU  rR  rf  ri  r  r  r  rj  ru  r  rn  Z_read_table_docstringZ_read_table_exampler  rO   r}   r   r  r   Z_write_table_exampler  r  r  r{  r  __all__r%   r%   r%   r&   <module>   sP  0	
)C   	   c
	 3 ;
 0Gik
(    p  
'   RO        n 
                       
9"               
  G
H  
2  
.