U
    楡c{                     @   s~   d Z ddlmZmZmZ ddlmZ ddlmZm	Z	m
Z
 ddlmZ ddlmZ ddlZddlZdZG d	d
 d
eeeeZdS )z/Module containing a database to deal with packs    )
FileDBBase	ObjectDBR	CachingDB)	LazyMixin)	BadObjectUnsupportedOperationAmbiguousObjectName)
PackEntity)reduceN)PackedDBc                       s   e Zd ZdZdZ fddZdd Zdd Zd	d
 Zdd Z	dd Z
dd Zdd Zdd Zdd ZdddZdd Zdd Z  ZS )r   z-A database operating on a set of object packsi  c                    s   t  | d| _d| _d S Nr   )super__init__
_hit_count	_st_mtime)self	root_path	__class__ 1/tmp/pip-unpacked-wheel-jjg5dubb/gitdb/db/pack.pyr   )   s    zPackedDB.__init__c                 C   s    |dkrt  | _| jdd d S )N	_entitiesT)force)listr   update_cache)r   attrr   r   r   _set_cache_3   s    zPackedDB._set_cache_c                 C   s   | j jdd dd d S )Nc                 S   s   | d S r   r   )lr   r   r   <lambda>:       z)PackedDB._sort_entities.<locals>.<lambda>T)keyreverse)r   sortr   r   r   r   _sort_entities9   s    zPackedDB._sort_entitiesc                 C   sr   | j | j dkr|   | jD ]F}|d |}|dk	r|d  d7  < |  j d7  _ |d |f  S qt|dS )a  :return: tuple(entity, index) for an item at the given sha
        :param sha: 20 or 40 byte sha
        :raise BadObject:
        **Note:** This method is not thread-safe, but may be hit in multi-threaded
            operation. The worst thing that can happen though is a counter that
            was not incremented, or the list being in wrong order. So we safe
            the time for locking here, lets see how that goesr      N   )r   _sort_intervalr$   r   r   )r   shaitemindexr   r   r   
_pack_info<   s    	
zPackedDB._pack_infoc                 C   s,   z|  | W dS  tk
r&   Y dS X d S )NTF)r+   r   )r   r(   r   r   r   
has_objectY   s
    
zPackedDB.has_objectc                 C   s   |  |\}}||S N)r+   Zinfo_at_indexr   r(   entityr*   r   r   r   infoa   s    zPackedDB.infoc                 C   s   |  |\}}||S r-   )r+   Zstream_at_indexr.   r   r   r   streame   s    zPackedDB.streamc                 c   s<   |   D ].}| }|j}t| D ]}||V  q&qd S r-   )entitiesr*   r(   rangesize)r   r/   r*   Zsha_by_indexr   r   r   sha_iteri   s
    zPackedDB.sha_iterc                 C   s    dd | j D }tdd |dS )Nc                 S   s   g | ]}|d     qS r&   )r*   r4   .0r)   r   r   r   
<listcomp>s   s     z!PackedDB.size.<locals>.<listcomp>c                 S   s   | | S r-   r   )xyr   r   r   r   t   r   zPackedDB.size.<locals>.<lambda>r   )r   r
   )r   Zsizesr   r   r   r4   r   s    zPackedDB.sizec                 C   s
   t  dS )zStoring individual objects is not feasible as a pack is designed to
        hold multiple objects. Writing or rewriting packs for single objects is
        inefficientN)r   )r   Zistreamr   r   r   storez   s    zPackedDB.storeFc           
      C   s   t |  }|s"|j| jkr"dS |j| _ttt j|  d}dd | j	D }|| D ],}t
|}| j	|  || jg q^|| D ]L}d}t| j	D ]$\}}	|	d   |kr|} qq|dkst| j	|= q|   dS )a  
        Update our cache with the actually existing packs on disk. Add new ones,
        and remove deleted ones. We keep the unchanged ones

        :param force: If True, the cache will be updated even though the directory
            does not appear to have changed according to its modification timestamp.
        :return: True if the packs have been updated so there is new information,
            False if there was no change to the pack databaseFzpack-*.packc                 S   s   h | ]}|d     qS r6   )packpathr7   r   r   r   	<setcomp>   s     z(PackedDB.update_cache.<locals>.<setcomp>r&   T)osstatr   st_mtimer   setglobr>   joinr   r	   appendr=   r4   r*   Zsha_to_index	enumerateAssertionErrorr$   )
r   r   rB   Z
pack_filesZour_pack_filesZ	pack_filer/   Z	del_indexir)   r   r   r   r      s&    	"
zPackedDB.update_cachec                 C   s   dd | j D S )z=:return: list of pack entities operated upon by this databasec                 S   s   g | ]}|d  qS r6   r   r7   r   r   r   r9      s     z%PackedDB.entities.<locals>.<listcomp>)r   r#   r   r   r   r2      s    zPackedDB.entitiesc                 C   sj   d}| j D ]J}|d  ||}|dk	r
|d  |}|rP||krPt||}q
|r^|S t|dS )a  :return: 20 byte sha as inferred by the given partial binary sha
        :param partial_binsha: binary sha with less than 20 bytes
        :param canonical_length: length of the corresponding canonical representation.
            It is required as binary sha's cannot display whether the original hex sha
            had an odd or even number of characters
        :raise AmbiguousObjectName:
        :raise BadObject: Nr&   )r   r*   Zpartial_sha_to_indexr(   r   r   )r   Zpartial_binshaZcanonical_length	candidater)   Z
item_indexr(   r   r   r   partial_to_complete_sha   s    
z PackedDB.partial_to_complete_sha)F)__name__
__module____qualname____doc__r'   r   r   r$   r+   r,   r0   r1   r5   r4   r<   r   r2   rL   __classcell__r   r   r   r   r       s   
	

-r   )rP   Zgitdb.db.baser   r   r   Z
gitdb.utilr   Z	gitdb.excr   r   r   Z
gitdb.packr	   	functoolsr
   rA   rE   __all__r   r   r   r   r   <module>   s   