11# Contains standalone functions to accompany the index implementation and make it
22# more versatile
33# NOTE: Autodoc hates it if this is a docstring
4- from git . types import PathLike
4+
55from io import BytesIO
66import os
77from stat import (
1313 S_IFREG ,
1414)
1515import subprocess
16- from typing import List , Tuple , Union , cast
1716
1817from git .cmd import PROC_CREATIONFLAGS , handle_process_output
1918from git .compat import (
4948 unpack
5049)
5150
51+ # typing -----------------------------------------------------------------------------
52+
53+ from typing import (Dict , IO , List , Sequence , TYPE_CHECKING , Tuple , Type , Union , cast )
54+
55+ from git .types import PathLike
56+
57+ if TYPE_CHECKING :
58+ from .base import IndexFile
59+
60+ # ------------------------------------------------------------------------------------
61+
5262
5363S_IFGITLINK = S_IFLNK | S_IFDIR # a submodule
5464CE_NAMEMASK_INV = ~ CE_NAMEMASK
5767 'stat_mode_to_index_mode' , 'S_IFGITLINK' , 'run_commit_hook' , 'hook_path' )
5868
5969
60- def hook_path (name , git_dir ) :
70+ def hook_path (name : str , git_dir : PathLike ) -> str :
6171 """:return: path to the given named hook in the given git repository directory"""
6272 return osp .join (git_dir , 'hooks' , name )
6373
6474
65- def run_commit_hook (name , index , * args ) :
75+ def run_commit_hook (name : str , index : IndexFile , * args : str ) -> None :
6676 """Run the commit hook of the given name. Silently ignores hooks that do not exist.
6777 :param name: name of hook, like 'pre-commit'
6878 :param index: IndexFile instance
6979 :param args: arguments passed to hook file
7080 :raises HookExecutionError: """
7181 hp = hook_path (name , index .repo .git_dir )
7282 if not os .access (hp , os .X_OK ):
73- return
83+ return None
7484
7585 env = os .environ .copy ()
76- env ['GIT_INDEX_FILE' ] = safe_decode (index .path )
86+ env ['GIT_INDEX_FILE' ] = safe_decode (str ( index .path ) )
7787 env ['GIT_EDITOR' ] = ':'
7888 try :
7989 cmd = subprocess .Popen ([hp ] + list (args ),
@@ -86,14 +96,14 @@ def run_commit_hook(name, index, *args):
8696 except Exception as ex :
8797 raise HookExecutionError (hp , ex ) from ex
8898 else :
89- stdout = []
90- stderr = []
91- handle_process_output (cmd , stdout .append , stderr .append , finalize_process )
92- stdout = '' .join (stdout )
93- stderr = '' .join (stderr )
99+ stdout_list = [] # type: List[str ]
100+ stderr_list = [] # type: List[str ]
101+ handle_process_output (cmd , stdout_list .append , stderr_list .append , finalize_process )
102+ stdout_str = '' .join (stderr_list )
103+ stderr_str = '' .join (stderr_list )
94104 if cmd .returncode != 0 :
95- stdout = force_text (stdout , defenc )
96- stderr = force_text (stderr , defenc )
105+ stdout = force_text (stdout_str , defenc )
106+ stderr = force_text (stderr_str , defenc )
97107 raise HookExecutionError (hp , cmd .returncode , stderr , stdout )
98108 # end handle return code
99109
@@ -108,7 +118,9 @@ def stat_mode_to_index_mode(mode):
108118 return S_IFREG | 0o644 | (mode & 0o111 ) # blobs with or without executable bit
109119
110120
111- def write_cache (entries , stream , extension_data = None , ShaStreamCls = IndexFileSHA1Writer ):
121+ def write_cache (entries : Sequence [Union [BaseIndexEntry , 'IndexEntry' ]], stream : IO [bytes ],
122+ extension_data : Union [None , bytes ] = None ,
123+ ShaStreamCls : Type [IndexFileSHA1Writer ] = IndexFileSHA1Writer ) -> None :
112124 """Write the cache represented by entries to a stream
113125
114126 :param entries: **sorted** list of entries
@@ -121,10 +133,10 @@ def write_cache(entries, stream, extension_data=None, ShaStreamCls=IndexFileSHA1
121133 :param extension_data: any kind of data to write as a trailer, it must begin
122134 a 4 byte identifier, followed by its size ( 4 bytes )"""
123135 # wrap the stream into a compatible writer
124- stream = ShaStreamCls (stream )
136+ stream_sha = ShaStreamCls (stream )
125137
126- tell = stream .tell
127- write = stream .write
138+ tell = stream_sha .tell
139+ write = stream_sha .write
128140
129141 # header
130142 version = 2
@@ -136,8 +148,8 @@ def write_cache(entries, stream, extension_data=None, ShaStreamCls=IndexFileSHA1
136148 beginoffset = tell ()
137149 write (entry [4 ]) # ctime
138150 write (entry [5 ]) # mtime
139- path = entry [3 ]
140- path = force_bytes (path , encoding = defenc )
151+ path_str = entry [3 ] # type: str
152+ path = force_bytes (path_str , encoding = defenc )
141153 plen = len (path ) & CE_NAMEMASK # path length
142154 assert plen == len (path ), "Path %s too long to fit into index" % entry [3 ]
143155 flags = plen | (entry [2 ] & CE_NAMEMASK_INV ) # clear possible previous values
@@ -150,18 +162,19 @@ def write_cache(entries, stream, extension_data=None, ShaStreamCls=IndexFileSHA1
150162
151163 # write previously cached extensions data
152164 if extension_data is not None :
153- stream .write (extension_data )
165+ stream_sha .write (extension_data )
154166
155167 # write the sha over the content
156- stream .write_sha ()
168+ stream_sha .write_sha ()
157169
158170
159- def read_header (stream ) :
171+ def read_header (stream : IO [ bytes ]) -> Tuple [ int , int ] :
160172 """Return tuple(version_long, num_entries) from the given stream"""
161173 type_id = stream .read (4 )
162174 if type_id != b"DIRC" :
163175 raise AssertionError ("Invalid index file header: %r" % type_id )
164- version , num_entries = unpack (">LL" , stream .read (4 * 2 ))
176+ unpacked = cast (Tuple [int , int ], unpack (">LL" , stream .read (4 * 2 )))
177+ version , num_entries = unpacked
165178
166179 # TODO: handle version 3: extended data, see read-cache.c
167180 assert version in (1 , 2 )
@@ -180,7 +193,7 @@ def entry_key(*entry: Union[BaseIndexEntry, PathLike, int]) -> Tuple[PathLike, i
180193 # END handle entry
181194
182195
183- def read_cache (stream ) :
196+ def read_cache (stream : IO [ bytes ]) -> Tuple [ int , Dict [ Tuple [ PathLike , int ], 'IndexEntry' ], bytes , bytes ] :
184197 """Read a cache file from the given stream
185198 :return: tuple(version, entries_dict, extension_data, content_sha)
186199 * version is the integer version number
@@ -189,7 +202,7 @@ def read_cache(stream):
189202 * content_sha is a 20 byte sha on all cache file contents"""
190203 version , num_entries = read_header (stream )
191204 count = 0
192- entries = {}
205+ entries = {} # type: Dict[Tuple[PathLike, int], 'IndexEntry']
193206
194207 read = stream .read
195208 tell = stream .tell
@@ -228,7 +241,8 @@ def read_cache(stream):
228241 return (version , entries , extension_data , content_sha )
229242
230243
231- def write_tree_from_cache (entries , odb , sl , si = 0 ):
244+ def write_tree_from_cache (entries : List [IndexEntry ], odb , sl : slice , si : int = 0
245+ ) -> Tuple [bytes , List [Tuple [str , int , str ]]]:
232246 """Create a tree from the given sorted list of entries and put the respective
233247 trees into the given object database
234248
@@ -238,7 +252,7 @@ def write_tree_from_cache(entries, odb, sl, si=0):
238252 :param sl: slice indicating the range we should process on the entries list
239253 :return: tuple(binsha, list(tree_entry, ...)) a tuple of a sha and a list of
240254 tree entries being a tuple of hexsha, mode, name"""
241- tree_items = []
255+ tree_items = [] # type: List[Tuple[Union[bytes, str], int, str]]
242256 tree_items_append = tree_items .append
243257 ci = sl .start
244258 end = sl .stop
@@ -277,18 +291,19 @@ def write_tree_from_cache(entries, odb, sl, si=0):
277291
278292 # finally create the tree
279293 sio = BytesIO ()
280- tree_to_stream (tree_items , sio .write )
294+ tree_to_stream (tree_items , sio .write ) # converts bytes of each item[0] to str
295+ tree_items_stringified = cast (List [Tuple [str , int , str ]], tree_items ) # type: List[Tuple[str, int, str]]
281296 sio .seek (0 )
282297
283298 istream = odb .store (IStream (str_tree_type , len (sio .getvalue ()), sio ))
284- return (istream .binsha , tree_items )
299+ return (istream .binsha , tree_items_stringified )
285300
286301
287- def _tree_entry_to_baseindexentry (tree_entry , stage ) :
302+ def _tree_entry_to_baseindexentry (tree_entry : Tuple [ str , int , str ], stage : int ) -> BaseIndexEntry :
288303 return BaseIndexEntry ((tree_entry [1 ], tree_entry [0 ], stage << CE_STAGESHIFT , tree_entry [2 ]))
289304
290305
291- def aggressive_tree_merge (odb , tree_shas ) -> List [BaseIndexEntry ]:
306+ def aggressive_tree_merge (odb , tree_shas : Sequence [ bytes ] ) -> List [BaseIndexEntry ]:
292307 """
293308 :return: list of BaseIndexEntries representing the aggressive merge of the given
294309 trees. All valid entries are on stage 0, whereas the conflicting ones are left
0 commit comments